id
stringlengths
22
25
commit_message
stringlengths
137
6.96k
diffs
listlengths
0
63
derby-DERBY-1149-e4ba4e13
DERBY-1149 : Fix failures in StatementTest.java. I also added a new constants file for testing that contains all the standard SQL State strings. Derby-specific SQL States can be added as needed. Passes derbyall, except for the sysinfo tests, which is a known regression when running against the classes directory. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@390176 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/functionTests/util/SQLStateConstants.java", "hunks": [ { "added": [ "/*", " Derby - Class org.apache.derbyTesting.functionTests.util.SQLState", "", " Copyright 2006 The Apache Software Foundation or its licensors, as applicable.", " ", " Licensed under the Apache License, Version 2.0 (the \"License\");", " you may not use this file except in compliance with the License.", " You may obtain a copy of the License at", " ", " http =//www.apache.org/licenses/LICENSE-2.0", " ", " Unless required by applicable law or agreed to in writing, software", " distributed under the License is distributed on an \"AS IS\" BASIS,", " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", " See the License for the specific language governing permissions and", " limitations under the License.", " ", "*/", "package org.apache.derbyTesting.functionTests.util;", " ", "/**", " * This contains constants for all the standard SQL states as well as", " * for those that are specific to Derby that our tests compare against", " * to make sure the right error is thrown.", " *", " * It is important to use these constants rather than those in", " * org.apache.derby.shared.common.reference.SQLState.java because", " * (a) that class is not part of the public API and (b) that class contains", " * message ids, not SQL states.", "*/", "public class SQLStateConstants", "{", " // ==== STANDARD SQL STATES =====", " // These are derived from the ISO SQL2003 specification", " // INCITS-ISO-IEC-9075-2-2003", " //", " public static final String AMBIGUOUS_CURSOR_NAME_NO_SUBCLASS ", " = \"3C000\";", " public static final String ATTEMPT_TO_ASSIGN_TO_NON_UPDATABLE_COLUMN_NO_SUBCLASS ", " = \"0U000\";", " public static final String ATTEMPT_TO_ASSIGN_TO_ORDERING_COLUMN_NO_SUBCLASS ", " = \"0V000\";", " public static final String CARDINALITY_VIOLATION_NO_SUBCLASS ", " = \"21000\";", " public static final String CLI_SPECIFIC_CONDITION_NO_SUBCLASS ", " = \"HY000\";", " public static final String CONNECTION_EXCEPTION_NO_SUBCLASS ", " = \"08000\";", " public static final String CONNECTION_EXCEPTION_CONNECTION_DOES_NOT_EXIST ", " = \"08003\";", " public static final String CONNECTION_EXCEPTION_CONNECTION_FAILURE ", " = \"08006\";", " public static final String CONNECTION_EXCEPTION_CONNECTION_NAME_IN_USE ", " = \"08002\";", " public static final String CONNECTION_EXCEPTION_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION ", " = \"08001\";", " public static final String CONNECTION_EXCEPTION_SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION ", " = \"08004\";", " public static final String CONNECTION_EXCEPTION_TRANSACTION_RESOLUTION_UNKNOWN ", " = \"08007\";", " public static final String DATA_EXCEPTION_NO_SUBCLASS ", " = \"22000\";", " public static final String DATA_EXCEPTION_ARRAY_ELEMENT_ERROR", " = \"2202E\";", " public static final String DATA_EXCEPTION_CHARACTER_NOT_IN_REPERTOIRE ", " = \"22021\";", " public static final String DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW ", " = \"22008\";", " public static final String DATA_EXCEPTION_DIVISION_BY_ZERO ", " = \"22012\";", " public static final String DATA_EXCEPTION_ERROR_IN_ASSIGNMENT ", " = \"22005\";", " public static final String DATA_EXCEPTION_ESCAPE_CHARACTER_CONFLICT ", " = \"2200B\";", " public static final String DATA_EXCEPTION_INDICATOR_OVERFLOW ", " = \"22022\";", " public static final String DATA_EXCEPTION_INTERVAL_FIELD_OVERFLOW ", " = \"22015\";", " public static final String DATA_EXCEPTION_INTERVAL_VALUE_OUT_OF_RANGE ", " = \"2200P\";", " public static final String DATA_EXCEPTION_INVALID_ARGUMENT_FOR_NATURAL_LOGARITHM ", " = \"2201E\";", " public static final String DATA_EXCEPTION_INVALID_ARGUMENT_FOR_POWER_FUNCTION ", " = \"2201F\";", " public static final String DATA_EXCEPTION_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION ", " = \"2201G\";", " public static final String DATA_EXCEPTION_INVALID_CHARACTER_VALUE_FOR_CAST ", " = \"22018\";", " public static final String DATA_EXCEPTION_INVALID_DATETIME_FORMAT ", " = \"22007\";", " public static final String DATA_EXCEPTION_INVALID_ESCAPE_CHARACTER ", " = \"22019\";", " public static final String DATA_EXCEPTION_INVALID_ESCAPE_OCTET", " = \"2200D\";", " public static final String DATA_EXCEPTION_INVALID_ESCAPE_SEQUENCE ", " = \"22025\";", " public static final String DATA_EXCEPTION_INVALID_INDICATOR_PARAMETER_VALUE ", " = \"22010\";", " public static final String DATA_EXCEPTION_INVALID_INTERVAL_FORMAT ", " = \"22006\";", " public static final String DATA_EXCEPTION_INVALID_PARAMETER_VALUE ", " = \"22023\";", " public static final String DATA_EXCEPTION_INVALID_PRECEDING_OR_FOLLOWING_SIZE_IN_WINDOW_FUNCTION ", " = \"22013\";", " public static final String DATA_EXCEPTION_INVALID_REGULAR_EXPRESSION ", " = \"2201B\";", " public static final String DATA_EXCEPTION_INVALID_REPEAT_ARGUMENT_IN_A_SAMPLE_CLAUSE ", " = \"2202G\";", " public static final String DATA_EXCEPTION_INVALID_SAMPLE_SIZE ", " = \"2202H\";", " public static final String DATA_EXCEPTION_INVALID_TIME_ZONE_DISPLACEMENT_VALUE ", " = \"22009\";", " public static final String DATA_EXCEPTION_INVALID_USE_OF_ESCAPE_CHARACTER ", " = \"2200C\";", " public static final String DATA_EXCEPTION_NULL_VALUE_NO_INDICATOR_PARAMETER", " = \"2200G\";", " public static final String DATA_EXCEPTION_MOST_SPECIFIC_TYPE_MISMATCH ", " = \"22002\";", " public static final String DATA_EXCEPTION_MULTISET_VALUE_OVERFLOW ", " = \"2200Q\";", " public static final String DATA_EXCEPTION_NONCHARACTER_IN_UCS_STRING ", " = \"22029\";", " public static final String DATA_EXCEPTION_NULL_VALUE_NOT_ALLOWED ", " = \"22004\";", " public static final String DATA_EXCEPTION_NULL_VALUE_SUBSTITUTED_FOR_MUTATOR_SUBJECT_PARAMETER ", " = \"2202D\";", " public static final String DATA_EXCEPTION_NUMERIC_VALUE_OUT_OF_RANGE ", " = \"22003\";", " public static final String DATA_EXCEPTION_SEQUENCE_GENERATOR_LIMIT_EXCEEDED ", " = \"2200H\";", " public static final String DATA_EXCEPTION_STRING_DATA_LENGTH_MISMATCH ", " = \"22026\";", " public static final String DATA_EXCEPTION_STRING_DATA_RIGHT_TRUNCATION ", " = \"22001\";", " public static final String DATA_EXCEPTION_SUBSTRING_ERROR ", " = \"22011\";", " public static final String DATA_EXCEPTION_TRIM_ERROR ", " = \"22027\";", " public static final String DATA_EXCEPTION_UNTERMINATED_C_STRING ", " = \"22024\";", " public static final String DATA_EXCEPTION_ZERO_LENGTH_CHARACTER_STRING ", " = \"2200F\";", " public static final String DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST_NO_SUBCLASS ", " = \"2B000\";", " public static final String DIAGNOSTICS_EXCEPTION_NO_SUBCLASS ", " = \"0Z000\";", " public static final String DIAGNOSTICS_EXCEPTION_MAXIMUM_NUMBER_OF_DIAGNOSTICS_AREAS_EXCEEDED ", " = \"0Z001\";", " public static final String DYNAMIC_SQL_ERROR_NO_SUBCLASS ", " = \"07000\";", " public static final String DYNAMIC_SQL_ERROR_CURSOR_SPECIFICATION_CANNOT_BE_EXECUTED ", " = \"07003\";", " public static final String DYNAMIC_SQL_ERROR_INVALID_DATETIME_INTERVAL_CODE ", " = \"0700F\";", " public static final String DYNAMIC_SQL_ERROR_INVALID_DESCRIPTOR_COUNT ", " = \"07008\";", " public static final String DYNAMIC_SQL_ERROR_INVALID_DESCRIPTOR_INDEX ", " = \"07009\";", " public static final String DYNAMIC_SQL_ERROR_PREPARED_STATEMENT_NOT_A_CURSOR_SPECIFICATION ", " = \"07005\";", " public static final String DYNAMIC_SQL_ERROR_RESTRICTED_DATA_TYPE_ATTRIBUTE_VIOLATION ", " = \"07006\";", " public static final String DYNAMIC_SQL_ERROR_DATA_TYPE_TRANSFORM_FUNCTION_VIOLATION ", " = \"0700B\";", " public static final String DYNAMIC_SQL_ERROR_INVALID_DATA_TARGET ", " = \"0700D\";", " public static final String DYNAMIC_SQL_ERROR_INVALID_LEVEL_VALUE ", " = \"0700E\";", " public static final String DYNAMIC_SQL_ERROR_UNDEFINED_DATA_VALUE ", " = \"0700C\";", " public static final String DYNAMIC_SQL_ERROR_USING_CLAUSE_DOES_NOT_MATCH_DYNAMIC_PARAMETER_SPEC ", " = \"07001\";", " public static final String DYNAMIC_SQL_ERROR_USING_CLAUSE_DOES_NOT_MATCH_TARGET_SPEC ", " = \"07002\";", " public static final String DYNAMIC_SQL_ERROR_USING_CLAUSE_REQUIRED_FOR_DYNAMIC_PARAMETERS ", " = \"07004\";", " public static final String DYNAMIC_SQL_ERROR_USING_CLAUSE_REQUIRED_FOR_RESULT_FIELDS ", " = \"07007\";", " public static final String EXTERNAL_ROUTINE_EXCEPTION_NO_SUBCLASS ", " = \"38000\";", " public static final String EXTERNAL_ROUTINE_EXCEPTION_CONTAINING_SQL_NOT_PERMITTED ", " = \"38001\";", " public static final String EXTERNAL_ROUTINE_EXCEPTION_MODIFYING_SQL_DATA_NOT_PERMITTED ", " = \"38002\";", " public static final String EXTERNAL_ROUTINE_EXCEPTION_PROHIBITED_SQL_STATEMENT_ATTEMPTED ", " = \"38003\";", " public static final String EXTERNAL_ROUTINE_EXCEPTION_READING_SQL_DATA_NOT_PERMITTED ", " = \"38004\";", " public static final String EXTERNAL_ROUTINE_INVOCATION_EXCEPTION_NO_SUBCLASS ", " = \"39000\";", " public static final String EXTERNAL_ROUTINE_INVOCATION_EXCEPTION_NULL_VALUE_NOT_ALLOWED ", " = \"39004\";", " public static final String FEATURE_NOT_SUPPORTED_NO_SUBCLASS ", " = \"0A000\";", " public static final String FEATURE_NOT_SUPPORTED_MULTIPLE_ENVIRONMENT_TRANSACTIONS ", " = \"0A001\";", " public static final String INTEGRITY_CONSTRAINT_VIOLATION_NO_SUBCLASS ", " = \"23000\";", " public static final String INTEGRITY_CONSTRAINT_VIOLATION_RESTRICT_VIOLATION ", " = \"23001\";", " public static final String INVALID_AUTHORIZATION_SPECIFICATION_NO_SUBCLASS ", " = \"28000\";", " public static final String INVALID_CATALOG_NAME_NO_SUBCLASS ", " = \"3D000\";", " public static final String INVALID_CHARACTER_SET_NAME_NO_SUBCLASS ", " = \"2C000\";", " public static final String INVALID_COLLATION_NAME_NO_SUBCLASS ", " = \"2H000\";", " public static final String INVALID_CONDITION_NUMBER_NO_SUBCLASS ", " = \"35000\";", " public static final String INVALID_CONNECTION_NAME_NO_SUBCLASS ", " = \"2E000\";", " public static final String INVALID_CURSOR_NAME_NO_SUBCLASS ", " = \"34000\";", " public static final String INVALID_CURSOR_STATE_NO_SUBCLASS ", " = \"24000\";", " public static final String INVALID_GRANTOR_STATE_NO_SUBCLASS ", " = \"0L000\";", " public static final String INVALID_ROLE_SPECIFICATION ", " = \"0P000\";", " public static final String INVALID_SCHEMA_NAME_NO_SUBCLASS ", " = \"3F000\";", " public static final String INVALID_SCHEMA_NAME_LIST_SPECIFICATION_NO_SUBCLASS ", " = \"0E000\";", " public static final String INVALID_SQL_DESCRIPTOR_NAME_NO_SUBCLASS ", " = \"33000\";", " public static final String INVALID_SQL_INVOKED_PROCEDURE_REFERENCE_NO_SUBCLASS ", " = \"0M000\";", " public static final String INVALID_SQL_STATEMENT ", " = \"30000\";", " public static final String INVALID_SQL_STATEMENT_IDENTIFIER_NO_SUBCLASS ", " = \"30000\";", " public static final String INVALID_SQL_STATEMENT_NAME_NO_SUBCLASS ", " = \"26000\";", " public static final String INVALID_TRANSFORM_GROUP_NAME_SPECIFICATION_NO_SUBCLASS ", " = \"0S000\";", " public static final String INVALID_TRANSACTION_STATE_NO_SUBCLASS ", " = \"25000\";", " public static final String INVALID_TRANSACTION_STATE_ACTIVE_SQL_TRANSACTION ", " = \"25001\";", " public static final String INVALID_TRANSACTION_STATE_BRANCH_TRANSACTION_ALREADY_ACTIVE ", " = \"25002\";", " public static final String INVALID_TRANSACTION_STATE_HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL ", " = \"25008\";", " public static final String INVALID_TRANSACTION_STATE_INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION ", " = \"25003\";", " public static final String INVALID_TRANSACTION_STATE_INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION ", " = \"25004\";", " public static final String INVALID_TRANSACTION_STATE_NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION ", " = \"25005\";", " public static final String INVALID_TRANSACTION_STATE_READ_ONLY_SQL_TRANSACTION ", " = \"25006\";", " public static final String INVALID_TRANSACTION_STATE_SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED ", " = \"25007\";", " public static final String INVALID_TRANSACTION_INITIATION_NO_SUBCLASS ", " = \"0B000\";", " public static final String INVALID_TRANSACTION_TERMINATION_NO_SUBCLASS ", " = \"2D000\";", " public static final String LOCATOR_EXCEPTION_INVALID_SPECIFICATION ", " = \"0F001\";", " public static final String LOCATOR_EXCEPTION_NO_SUBCLASS ", " = \"0F000\";", " public static final String NO_DATA_NO_SUBCLASS ", " = \"02000\";", " public static final String NO_DATA_NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED ", " = \"02001\";", " public static final String REMOTE_DATABASE_ACCESS_NO_SUBCLASS ", " = \"HZ000\";", " public static final String SAVEPOINT_EXCEPTION_INVALID_SPECIFICATION ", " = \"3B001\";", " public static final String SAVEPOINT_EXCEPTION_NO_SUBCLASS ", " = \"3B000\";", " public static final String SAVEPOINT_EXCEPTION_TOO_MANY ", " = \"3B002\";", " public static final String SQL_ROUTINE_EXCEPTION_NO_SUBCLASS ", " = \"2F000\";", " public static final String SQL_ROUTINE_EXCEPTION_FUNCTION_EXECUTED_NO_RETURN_STATEMENT ", " = \"2F005\";", " public static final String SQL_ROUTINE_EXCEPTION_MODIFYING_SQL_DATA_NOT_PERMITTED ", " = \"2F002\";", " public static final String SQL_ROUTINE_EXCEPTION_PROHIBITED_SQL_STATEMENT_ATTEMPTED ", " = \"2F003\";", " public static final String SQL_ROUTINE_EXCEPTION_READING_SQL_DATA_NOT_PERMITTED ", " = \"2F004\";", " public static final String SUCCESSFUL_COMPLETION_NO_SUBCLASS ", " = \"00000\";", " public static final String SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION_NO_SUBCLASS ", " = \"42000\";", " public static final String SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION_IN_DIRECT_STATEMENT_NO_SUBCLASS ", " = \"2A000\";", " public static final String SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION_IN_DYNAMIC_STATEMENT_NO_SUBCLASS ", " = \"37000\";", " public static final String TARGET_TABLE_DISAGREES_WITH_CURSOR_SPECIFICATION_NO_SUBCLASS ", " = \"0T000\";", " public static final String TRANSACTION_ROLLBACK_NO_SUBCLASS ", " = \"40000\";", " public static final String TRANSACTION_ROLLBACK_INTEGRITY_CONSTRAINT_VIOLATION ", " = \"40002\";", " public static final String TRANSACTION_ROLLBACK_SERIALIZATION_FAILURE ", " = \"40001\";", " public static final String TRANSACTION_ROLLBACK_STATEMENT_COMPLETION_UNKNOWN ", " = \"40003\";", " public static final String TRIGGERED_DATA_CHANGE_VIOLATION_NO_SUBCLASS ", " = \"27000\";", " public static final String WARNING_NO_SUBCLASS ", " = \"01000\";", " public static final String WARNING_ADDITIONAL_RESULT_SETS_RETURNED ", " = \"0100D\";", " public static final String WARNING_ARRAY_DATA_RIGHT_TRUNCATION ", " = \"0102F\";", " public static final String WARNING_ATTEMPT_TO_RETURN_TOO_MANY_RESULT_SETS ", " = \"0100E\";", " public static final String WARNING_CURSOR_OPERATION_CONFLICT ", " = \"01001\";", " public static final String WARNING_DEFAULT_VALUE_TOO_LONG_FOR_INFORMATION_SCHEMA ", " = \"0100B\";", " public static final String WARNING_DISCONNECT_ERROR ", " = \"01002\";", " public static final String WARNING_DYNAMIC_RESULT_SETS_RETURNED ", " = \"0100C\";", " public static final String WARNING_INSUFFICIENT_ITEM_DESCRIPTOR_AREAS ", " = \"01005\";", " public static final String WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION ", " = \"01003\";", " public static final String WARNING_PRIVILEGE_NOT_GRANTED ", " = \"01007\";", " public static final String WARNING_PRIVILEGE_NOT_REVOKED ", " = \"01006\";", " public static final String WARNING_QUERY_EXPRESSION_TOO_LONG_FOR_INFORMATION_SCHEMA ", " = \"0100A\";", " public static final String WARNING_SEARCH_CONDITION_TOO_LONG_FOR_INFORMATION_SCHEMA ", " = \"01009\";", " public static final String WARNING_STATEMENT_TOO_LONG_FOR_INFORMATION_SCHEMA ", " = \"0100F\";", " public static final String WARNING_STRING_DATA_RIGHT_TRUNCATION_WARNING ", " = \"01004\";", " public static final String WITH_CHECK_OPTION_VIOLATION_NO_SUBCLASS ", " = \"44000\";", "}" ], "header": "@@ -0,0 +1,339 @@", "removed": [] } ] } ]
derby-DERBY-1156-8a62d60d
DERBY-1156 (partial): re-encryting an encrypted database This patch adds some code required to support reconfigure(rencryption) of an already existing encrypted database with a new password(secret key) or an external user specified encryption key. Two new attributes "newBootPassword" and "newEncryptionkey" are introduced to support this functionality. - modified the code to support two have instance of two cipher factories to exist. So that the existing data can decrypted with the old encryption key using one cipher factory and rewrite the data with new encryption keys using another cipher factory. - re-enryption of the database with new keys is similar to encrypting an already existing database. All the container data is read through the page cache and rewritten using the new encryption keys. - Added test case to test the re-encryption of an encrypted database. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@416536 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/reference/Attribute.java", "hunks": [ { "added": [ " /**", "\t The attribute that is used to chage the secret key of an encrypted", " database. The secret key must be at least 8 characters long.", "\t\tThis key must not be stored persistently in cleartext anywhere. ", "\t */", "", "\tString NEW_BOOT_PASSWORD = \"newBootPassword\";", "", "", "" ], "header": "@@ -92,6 +92,16 @@ public interface Attribute {", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/services/jce/JCECipherFactory.java", "hunks": [ { "added": [], "header": "@@ -23,7 +23,6 @@ package org.apache.derby.impl.services.jce;", "removed": [ "import org.apache.derby.iapi.services.monitor.ModuleControl;" ] }, { "added": [ "public final class JCECipherFactory implements CipherFactory, java.security.PrivilegedExceptionAction" ], "header": "@@ -66,7 +65,7 @@ import org.apache.derby.io.StorageRandomAccessFile;", "removed": [ "public final class JCECipherFactory implements CipherFactory, ModuleControl, java.security.PrivilegedExceptionAction" ] }, { "added": [ "", " /*", " * Constructor of JCECipherFactory, initializes the new instances.", " *", " * @param create true, if the database is getting configured ", " * for encryption.", " * @param props\t encryption properties/attributes to use", " * for creating the cipher factory.", " * @param newAttrs true, if cipher factory has to be created using ", " * should using the new attributes specified by the user. ", " * For example to reencrypt the database with ", " * a new password.", " */", " public JCECipherFactory(boolean create, ", " Properties props,", " boolean newAttributes) ", " throws StandardException", " {", " init(create, props, newAttributes);", " }", " ", "", "" ], "header": "@@ -113,6 +112,29 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [] }, { "added": [ " /*", " * Initilize the new instance of this class. ", " */", "\tpublic void\tinit(boolean create, Properties properties, boolean newAttrs)" ], "header": "@@ -373,11 +395,11 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [ "\t/*", "\t * module control methods", "\t */", "\tpublic void\tboot(boolean create, Properties properties)" ] }, { "added": [ " // get the external key specified by the user to ", " // encrypt the database. If user is reencrypting the", " // database with a new encryption key, read the value of ", " // the new encryption key. ", " String externalKey = properties.getProperty((newAttrs ? ", " Attribute.NEW_CRYPTO_EXTERNAL_KEY:", " Attribute.CRYPTO_EXTERNAL_KEY));" ], "header": "@@ -385,7 +407,13 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [ "\t\tString externalKey = properties.getProperty(Attribute.CRYPTO_EXTERNAL_KEY);" ] }, { "added": [ "\t\t\t\tif (properties.getProperty((newAttrs ? ", " Attribute.NEW_BOOT_PASSWORD :", " Attribute.BOOT_PASSWORD)) != null)", "\t\t\t\tgeneratedKey = ", " org.apache.derby.iapi.util.StringUtil.fromHexString(externalKey, ", " 0, ", " externalKey.length());" ], "header": "@@ -549,10 +577,15 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [ "\t\t\t\tif (properties.getProperty(Attribute.BOOT_PASSWORD) != null)", "\t\t\t\tgeneratedKey = org.apache.derby.iapi.util.StringUtil.fromHexString(externalKey, 0, externalKey.length());" ] }, { "added": [ "\t\t\t\tgeneratedKey = handleBootPassword(create, properties, newAttrs);", "\t\t\t\tif(create || newAttrs)" ], "header": "@@ -564,8 +597,8 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [ "\t\t\t\tgeneratedKey = handleBootPassword(create, properties);", "\t\t\t\tif(create)" ] }, { "added": [ "", "\tprivate byte[] handleBootPassword(boolean create, ", " Properties properties, ", " boolean newPasswd)", "", " // get the key specifed by the user. If user is reencrypting the", " // database; read the value of the new password. ", "\t\tString inputKey = properties.getProperty((newPasswd ? ", " Attribute.NEW_BOOT_PASSWORD : ", " Attribute.BOOT_PASSWORD));" ], "header": "@@ -608,10 +641,18 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [ "\tprivate byte[] handleBootPassword(boolean create, Properties properties)", "\t\tString inputKey = properties.getProperty(Attribute.BOOT_PASSWORD);" ] }, { "added": [ "\t\tif (create || newPasswd)" ], "header": "@@ -638,7 +679,7 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [ "\t\tif (create)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/RawStore.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.crypto.CipherFactoryBuilder;" ], "header": "@@ -24,6 +24,7 @@ import org.apache.derby.iapi.services.daemon.DaemonFactory;", "removed": [] }, { "added": [ "\tprivate CipherFactory currentCipherFactory;" ], "header": "@@ -113,7 +114,7 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\tprivate CipherFactory cipherFactory;" ] }, { "added": [ " boolean reEncrypt = false;", " CipherFactory newCipherFactory = null;" ], "header": "@@ -179,6 +180,8 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [] }, { "added": [ "" ], "header": "@@ -202,6 +205,7 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [] }, { "added": [ " } else {", " // check if the user has requested to renecrypt an", " // encrypted datbase with new encryption password/key.", " if (encryptedDatabase) {", " if (properties.getProperty(", " Attribute.NEW_BOOT_PASSWORD) != null) {", " reEncrypt = true;", " }", " else if (properties.getProperty(", " Attribute.NEW_CRYPTO_EXTERNAL_KEY) != null){", " reEncrypt = true;", " };", " encryptDatabase = reEncrypt;", " }", "", "" ], "header": "@@ -228,11 +232,27 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [] }, { "added": [ " // Incase of re-encryption of an already of encrypted database", " // only some information needs to updated; it is not treated ", " // like the configuring the database for encryption first time. ", " boolean setupEncryption = create || (encryptDatabase && !reEncrypt);", " // start the cipher factory module, that is is used to create ", " // instances of the cipher factory with specific enctyption ", " // properties. ", "", " CipherFactoryBuilder cb = (CipherFactoryBuilder)", " Monitor.startSystemModule(org.apache.derby.iapi.reference.Module.CipherFactoryBuilder);", "", " // create instance of the cipher factory with the ", " // specified encryption properties. ", " currentCipherFactory = cb.createCipherFactory(setupEncryption, ", " properties, ", " false);", " // connection url. For security reasons, this key is not made persistent" ], "header": "@@ -241,14 +261,26 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ " boolean setupEncryption = create || encryptDatabase; ", " cipherFactory =", " (CipherFactory)Monitor.bootServiceModule(setupEncryption, this,", "\t\t\t\t\t\torg.apache.derby.iapi.reference.Module.CipherFactory, properties);", " // connection url. For security reasons, this key is not made persistent" ] }, { "added": [ " currentCipherFactory.verifyKey(setupEncryption, storageFactory, properties);", " encryptionEngine = currentCipherFactory." ], "header": "@@ -259,10 +291,10 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ " cipherFactory.verifyKey(setupEncryption,storageFactory,properties);", " encryptionEngine = cipherFactory." ] }, { "added": [ " decryptionEngine = currentCipherFactory.", " random = currentCipherFactory.getSecureRandom();", "", " if (reEncrypt) {", " // create new cipher factory with the new encrytpion", " // properties specified by the user. This cipher factory", " // is used to create the new encryption/decryption", " // engines to reencrypt the database with the new", " // encryption keys. ", " newCipherFactory = ", " cb.createCipherFactory(setupEncryption, ", " properties, ", " true);", " newDecryptionEngine = ", " newCipherFactory.createNewCipher(CipherFactory.DECRYPT);", " newEncryptionEngine = ", " newCipherFactory.createNewCipher(CipherFactory.ENCRYPT);", " } else {", " // there is only one engine when configuring an ", " // unencrypted database for encryption ", " newDecryptionEngine = decryptionEngine;", " newEncryptionEngine = encryptionEngine;", "", " }", " currentCipherFactory.saveProperties(properties) ;" ], "header": "@@ -295,23 +327,40 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ " decryptionEngine = cipherFactory.", " random = cipherFactory.getSecureRandom();", " // for now there is only one encryption engine, ", " // configuring an unencrypted database for encryption ", " // is supported at this moment.", " newDecryptionEngine = decryptionEngine; ", " newEncryptionEngine = encryptionEngine;", " cipherFactory.saveProperties(properties) ;" ] }, { "added": [ " configureDatabaseForEncryption(properties, ", " reEncrypt, ", " newCipherFactory);" ], "header": "@@ -412,7 +461,9 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ " configureDatabaseForEncryption(properties);" ] }, { "added": [ " ", "" ], "header": "@@ -1149,6 +1200,8 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [] }, { "added": [ "\t\tif ((databaseEncrypted == false && encryptDatabase == false) || ", " (encryptionEngine == null && newEncryptionEngine == null))" ], "header": "@@ -1161,8 +1214,8 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t\tif (databaseEncrypted == false && encryptDatabase == false || ", " encryptionEngine == null && newEncryptionEngine == null)" ] }, { "added": [ "\t\treturn currentCipherFactory.changeBootPassword((String)changePassword, properties, encryptionEngine);" ], "header": "@@ -1235,7 +1288,7 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t\treturn cipherFactory.changeBootPassword((String)changePassword, properties, encryptionEngine);" ] }, { "added": [ " public void configureDatabaseForEncryption(Properties properties,", " boolean reEncrypt, ", " CipherFactory newCipherFactory) " ], "header": "@@ -1265,7 +1318,9 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ " public void configureDatabaseForEncryption(Properties properties) " ] } ] }, { "file": "java/testing/org/apache/derbyTesting/unitTests/crypto/T_Cipher.java", "hunks": [ { "added": [ " " ], "header": "@@ -79,7 +79,7 @@ public class T_Cipher extends T_Generic", "removed": [ "" ] }, { "added": [ "\t\treturn org.apache.derby.iapi.reference.Module.CipherFactoryBuilder;" ], "header": "@@ -90,7 +90,7 @@ public class T_Cipher extends T_Generic", "removed": [ "\t\treturn org.apache.derby.iapi.reference.Module.CipherFactory;" ] }, { "added": [ " CipherFactoryBuilder cb = (CipherFactoryBuilder)", " Monitor.startSystemModule(org.apache.derby.iapi.reference.Module.CipherFactoryBuilder);", "", " factory = cb.createCipherFactory(true, props, false);", "" ], "header": "@@ -212,8 +212,11 @@ public class T_Cipher extends T_Generic", "removed": [ "\t\tfactory = (CipherFactory)Monitor.bootServiceModule(true, (Object)null,", " org.apache.derby.iapi.reference.Module.CipherFactory, props);" ] } ] } ]
derby-DERBY-1156-ae71c745
DERBY-1156: Committing this for Suresh, reviewed by Mike Matrigali. Here is Mike's review comments: I have reviewed this patch and run a full set of tests on windows XP / sun jdk1.4.2 which passed. I think this patch should be committed as is. It is a good incremental checkin for this feature. It provides a set of tools for the rest of the project, and gets the code to the point that it can encrypt an existing unencrypted db. Future checkins should address a couple of things: 1) obviously more testing. Some quick notes, the current tests may want to show that accessing the newly encrypted db with a bad password does not work - just to insure all the url;s are not just being totally ignored. Also want to test abort - probably need a testing codepoint to cause the failure at the right time. 2) fix up the comments, there are some typo's . 3) usual nit - some greater than 80 char lines. I would have committed this change now, but svn still is not back. Patch submitted by Suresh Thalamati (suresh.thalamati@gmail.com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@407366 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/services/io/StoredFormatIds.java", "hunks": [ { "added": [ " \t\t/* org.apache.derby.impl.store.raw.data.EncryptContainerOperation */", " public static final int LOGOP_ENCRYPT_CONTAINER =", " (MIN_ID_2 + 459);", "", " \t\t/* org.apache.derby.impl.store.raw.data.EncryptContainerUndoOperation */", " public static final int LOGOP_ENCRYPT_CONTAINER_UNDO =", " (MIN_ID_2 + 460);" ], "header": "@@ -1707,6 +1707,13 @@ public interface StoredFormatIds {", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/store/raw/data/DataFactory.java", "hunks": [ { "added": [ "\t\t\t\t\t byte[] ciphertext, int outputOffset, ", " boolean newEngine)" ], "header": "@@ -269,7 +269,8 @@ public interface DataFactory extends Corruptable {", "removed": [ "\t\t\t\t\t byte[] ciphertext, int outputOffset)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/jce/JCECipherFactory.java", "hunks": [ { "added": [ "import java.util.Enumeration;" ], "header": "@@ -37,7 +37,7 @@ import org.apache.derby.iapi.reference.Attribute;", "removed": [ "" ] }, { "added": [ " // properties that needs to be stored in the", " // in the service.properties file.", " private Properties persistentProperties;", "", "" ], "header": "@@ -99,6 +99,11 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [] }, { "added": [ " persistentProperties = new Properties();" ], "header": "@@ -378,6 +383,7 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [] }, { "added": [ "\t\t\tpersistentProperties.put(Attribute.CRYPTO_ALGORITHM, ", " cryptoAlgorithm);" ], "header": "@@ -435,7 +441,8 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [ "\t\t\tproperties.put(Attribute.CRYPTO_ALGORITHM, cryptoAlgorithm);" ] }, { "added": [ "\t\t\t\t persistentProperties.put(Attribute.CRYPTO_KEY_LENGTH,", " keyLengthBits+\"-\"+generatedKey.length);" ], "header": "@@ -559,7 +566,8 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [ "\t\t\t\t properties.put(Attribute.CRYPTO_KEY_LENGTH,keyLengthBits+\"-\"+generatedKey.length);" ] }, { "added": [ "\t\t\t\tpersistentProperties.put(Attribute.DATA_ENCRYPTION, \"true\");", "\t\t\t\tpersistentProperties.put(RawStoreFactory.DATA_ENCRYPT_ALGORITHM_VERSION,", " String.valueOf(1));", "\t\t\t\tpersistentProperties.put(RawStoreFactory.LOG_ENCRYPT_ALGORITHM_VERSION,", " String.valueOf(1));" ], "header": "@@ -568,12 +576,14 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [ "\t\t\t\tproperties.put(Attribute.DATA_ENCRYPTION, \"true\");", "\t\t\t\tproperties.put(RawStoreFactory.DATA_ENCRYPT_ALGORITHM_VERSION,String.valueOf(1));", "\t\t\t\tproperties.put(RawStoreFactory.LOG_ENCRYPT_ALGORITHM_VERSION,String.valueOf(1));" ] }, { "added": [ "\t\t\tpersistentProperties.put(RawStoreFactory.ENCRYPTED_KEY, ", " saveSecretKey(generatedKey, bootPassword));" ], "header": "@@ -633,7 +643,8 @@ public final class JCECipherFactory implements CipherFactory, ModuleControl, jav", "removed": [ "\t\t\tproperties.put(RawStoreFactory.ENCRYPTED_KEY, saveSecretKey(generatedKey, bootPassword));" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/RawStore.java", "hunks": [ { "added": [ " private boolean encryptDatabase;", " private CipherProvider newEncryptionEngine;", "\tprivate CipherProvider newDecryptionEngine;" ], "header": "@@ -108,8 +108,11 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [] }, { "added": [], "header": "@@ -163,7 +166,6 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "" ] }, { "added": [ " String restoreFromBackup = null;", "", " // check if this is a restore from a backup copy. ", " restoreFromBackup = properties.getProperty(Attribute.CREATE_FROM);", " if(restoreFromBackup == null)", " restoreFromBackup = properties.getProperty(Attribute.RESTORE_FROM);", " if(restoreFromBackup == null)", " restoreFromBackup =", " properties.getProperty(Attribute.ROLL_FORWARD_RECOVERY_FROM);", "", " ", " // check if user has requested to encrypt the database or it is an", " // encrypted database.", "", " String dataEncryption = ", " properties.getProperty(Attribute.DATA_ENCRYPTION);", " databaseEncrypted = Boolean.valueOf(dataEncryption).booleanValue(); ", "", " if (!create && restoreFromBackup == null) {", " // check if database is already encrypted, by directly peeking at the", " // database service propertes instead of the properties passed ", " // to this method. By looking at properties to the boot method ,", " // one can not differentiate if user is requesting for database", " // encryption or the database is already encrypted because ", " // Attribute.DATA_ENCRYPTION is used to store in the ", " // service properties to indicate that database", " // is encrypted and also users can specify it as URL attribute ", " // to encrypt and existing database. ", " ", " String name = Monitor.getMonitor().getServiceName(this);", " PersistentService ps = Monitor.getMonitor().getServiceType(this);", " String canonicalName = ps.getCanonicalServiceName(name);", " Properties serviceprops = ps.getServiceProperties(canonicalName, ", " (Properties)null);", " dataEncryption = serviceprops.getProperty(Attribute.DATA_ENCRYPTION);", " boolean encryptedDatabase = Boolean.valueOf(dataEncryption).booleanValue();", "", " if (!encryptedDatabase && databaseEncrypted) {", " // it it not an encrypted database, user is asking to ", " // encrypt an un-encrypted database. ", " encryptDatabase = true;", " // set database as un-encrypted, we will set it as encrypted ", " // after encrypting the existing data. ", " databaseEncrypted = false;", " }", " ", " // NOTE: if user specifies Attribute.DATA_ENCRYPTION on the", " // connection URL by mistake on an already encrypted database, ", " // it is ignored.", " }", " // setup encryption engines. ", "\t\t\tif (databaseEncrypted || encryptDatabase)", " // check if database is configured for encryption, during", " // configuration some of the properties database; so that", " // user does not have to specify them on the URL everytime.", " boolean setupEncryption = create || encryptDatabase; ", " cipherFactory =", " (CipherFactory)Monitor.bootServiceModule(setupEncryption, this,", "\t\t\t\t\t\torg.apache.derby.iapi.reference.Module.CipherFactory, properties);", " // The database can be encrypted using an encryption key that is given at", " // connection url. For security reasons, this key is not made persistent", " // in the database. But it is necessary to verify the encryption key ", " // whenever booting the database if it is similar to the key that was used", " // during creation time. This needs to happen before we access the data/logs to ", " // avoid the risk of corrupting the database because of a wrong encryption key.", " ", " // Please note this verification process does not provide any added security", " // but is intended to allow to fail gracefully if a wrong encryption key ", " // is used during boot time", " cipherFactory.verifyKey(setupEncryption,storageFactory,properties);", " // Initializes the encryption and decryption engines", " encryptionEngine = cipherFactory.", " createNewCipher(CipherFactory.ENCRYPT);", " ", " // At creation time of an encrypted database, store the encryption block size", " // for the algorithm. Store this value as property given by ", " // RawStoreFactory.ENCRYPTION_BLOCKSIZE. This value", " // is made persistent by storing it in service.properties", " // To connect to an existing database, retrieve the value and use it for", " // appropriate padding.", " // The default value of encryption block size is 8,", " // to allow for downgrade issues", " // Before support for AES (beetle6023), default encryption block size supported", " // was 8", "", " if(setupEncryption) ", " {", " encryptionBlockSize = encryptionEngine.getEncryptionBlockSize();", " // in case of database create, store the encryption block", " // size. Incase of reconfiguring the existing datbase, this", " // will be saved after encrypting the exisiting data. ", " if (create)", " properties.put(RawStoreFactory.ENCRYPTION_BLOCKSIZE,", " String.valueOf(encryptionBlockSize));", " }", " else", " {", " if(properties.getProperty(RawStoreFactory.ENCRYPTION_BLOCKSIZE) != null)", " encryptionBlockSize = Integer.parseInt(properties.getProperty", " (RawStoreFactory.ENCRYPTION_BLOCKSIZE));", " else", " encryptionBlockSize = encryptionEngine.getEncryptionBlockSize();", " } ", "", " decryptionEngine = cipherFactory.", " createNewCipher(CipherFactory.DECRYPT);", "", " random = cipherFactory.getSecureRandom();", " ", " if (encryptDatabase) {", " // for now there is only one encryption engine, ", " // configuring an unencrypted database for encryption ", " // is supported at this moment.", " newDecryptionEngine = decryptionEngine; ", " newEncryptionEngine = encryptionEngine;", " }", " // save the encryption properties if encryption is enabled ", " // at database creation time. ", " if(create)", " cipherFactory.saveProperties(properties) ;" ], "header": "@@ -176,68 +178,140 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t\t\tString dataEncryption = properties.getProperty(Attribute.DATA_ENCRYPTION);", "\t\t\tdatabaseEncrypted = Boolean.valueOf(dataEncryption).booleanValue();", "", "\t\t\tif (databaseEncrypted)", "\t\t\t\t\tcipherFactory =", " (CipherFactory)Monitor.bootServiceModule(create, this,", "\t\t\t\t\t\torg.apache.derby.iapi.reference.Module.CipherFactory, properties);", "\t\t\t\t\t// The database can be encrypted using an encryption key that is given at", "\t\t\t\t\t// connection url. For security reasons, this key is not made persistent", "\t\t\t\t\t// in the database. But it is necessary to verify the encryption key ", "\t\t\t\t\t// whenever booting the database if it is similar to the key that was used", "\t\t\t\t\t// during creation time. This needs to happen before we access the data/logs to ", "\t\t\t\t\t// avoid the risk of corrupting the database because of a wrong encryption key.", "\t\t\t\t\t// Please note this verification process does not provide any added security", "\t\t\t\t // but is intended to allow to fail gracefully if a wrong encryption key ", "\t\t\t\t\t// is used during boot time", "\t\t\t\t\tcipherFactory.verifyKey(create,storageFactory,properties);", "", "\t\t\t\t\t// Initializes the encryption and decryption engines", "\t\t\t\t\tencryptionEngine = cipherFactory.", "\t\t\t\t\t\tcreateNewCipher(CipherFactory.ENCRYPT);", "", "\t // At creation time of an encrypted database, store the encryption block size", "\t\t\t\t\t// for the algorithm. Store this value as property given by ", "\t // RawStoreFactory.ENCRYPTION_BLOCKSIZE. This value", "\t // is made persistent by storing it in service.properties", "\t // To connect to an existing database, retrieve the value and use it for", "\t // appropriate padding.", "\t // The default value of encryption block size is 8,", "\t\t\t\t\t// to allow for downgrade issues", "\t\t\t\t\t// Before support for AES (beetle6023), default encryption block size supported", "\t\t\t\t\t// was 8", "", "\t\t\t\t\tif(create)", "\t\t\t\t\t{", "\t\t\t\t\t\tencryptionBlockSize = encryptionEngine.getEncryptionBlockSize();", "\t\t\t\t\t\tproperties.put(RawStoreFactory.ENCRYPTION_BLOCKSIZE,", "\t\t\t\t\t\t\t\tString.valueOf(encryptionBlockSize));", "\t\t\t\t\t}", "\t\t\t\t\telse", "\t\t\t\t\t{", "\t\t\t\t\t\tif(properties.getProperty(RawStoreFactory.ENCRYPTION_BLOCKSIZE) != null)", "\t\t\t\t\t\t encryptionBlockSize = Integer.parseInt(properties.getProperty", "\t\t\t\t\t\t\t\t\t\t(RawStoreFactory.ENCRYPTION_BLOCKSIZE));", "\t\t\t\t\t}", "\t\t\t\t\tdecryptionEngine = cipherFactory.", "\t\t\t\t\t\tcreateNewCipher(CipherFactory.DECRYPT);", "\t\t\t\t\trandom = cipherFactory.getSecureRandom();" ] }, { "added": [ " if (databaseEncrypted) {", " // let log factory know if the database is encrypted . ", " logFactory.setDatabaseEncrypted();", " // let data factory know if the database is encrypted. ", " dataFactory.setDatabaseEncrypted();", " }" ], "header": "@@ -259,13 +333,12 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t\tString restoreFromBackup =null;", "\t\trestoreFromBackup = properties.getProperty(Attribute.CREATE_FROM);", "\t\tif(restoreFromBackup == null)", "\t\t\trestoreFromBackup = properties.getProperty(Attribute.RESTORE_FROM);", "\t\tif(restoreFromBackup == null)", "\t\t\trestoreFromBackup =", "\t\t\t\tproperties.getProperty(Attribute.ROLL_FORWARD_RECOVERY_FROM);" ] }, { "added": [ "" ], "header": "@@ -320,6 +393,7 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [] }, { "added": [ " // if user requested to encrpty an unecrypted database or encrypt with", " // new alogorithm then do that now. ", " if (encryptDatabase) {", " configureDatabaseForEncryption(properties);", " }" ], "header": "@@ -335,6 +409,11 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [] }, { "added": [ "" ], "header": "@@ -1069,6 +1148,7 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [] }, { "added": [ "\t\t\t\t\t byte[] ciphertext, int outputOffset, ", " boolean newEngine)", "\t\tif (databaseEncrypted == false && encryptDatabase == false || ", " encryptionEngine == null && newEncryptionEngine == null)" ], "header": "@@ -1077,10 +1157,12 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t\t\t\t\t byte[] ciphertext, int outputOffset)", "\t\tif (databaseEncrypted == false || encryptionEngine == null)" ] }, { "added": [ " if (newEngine) {", " return newEncryptionEngine.encrypt(cleartext, offset, length,", " ciphertext, outputOffset);", " } else {", " return encryptionEngine.encrypt(cleartext, offset, length,", " ciphertext, outputOffset);", " }" ], "header": "@@ -1088,8 +1170,13 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t\treturn encryptionEngine.encrypt(cleartext, offset, length,", "\t\t\t\t\t\t\t\t\t\tciphertext, outputOffset);" ] }, { "added": [ "\t\t\t\t\t byte[] cleartext, int outputOffset) " ], "header": "@@ -1100,7 +1187,7 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t\t\t\t\t byte[] cleartext, int outputOffset)" ] }, { "added": [ " return decryptionEngine.decrypt(ciphertext, offset, length,", " cleartext, outputOffset);" ], "header": "@@ -1110,9 +1197,8 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "", "\t\treturn decryptionEngine.decrypt(ciphertext, offset, length,", "\t\t\t\t\t\t\t\t\t\tcleartext, outputOffset);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/BaseDataFileFactory.java", "hunks": [ { "added": [], "header": "@@ -324,12 +324,6 @@ public final class BaseDataFileFactory", "removed": [ "\t\t// restoreFrom and createFrom operations also need to know if database ", " // is encrypted", "\t\tString dataEncryption = ", " startParams.getProperty(Attribute.DATA_ENCRYPTION);", "", "\t\tdatabaseEncrypted = Boolean.valueOf(dataEncryption).booleanValue();" ] }, { "added": [ " // restoreFrom and createFrom operations also need to know if database ", " // is encrypted", " String dataEncryption = ", " startParams.getProperty(Attribute.DATA_ENCRYPTION);", " databaseEncrypted = Boolean.valueOf(dataEncryption).booleanValue();" ], "header": "@@ -344,6 +338,11 @@ public final class BaseDataFileFactory", "removed": [] }, { "added": [ " public void setDatabaseEncrypted()", "\t{", "\t\tdatabaseEncrypted = true;", "\t}", "", " int outputOffset,", " boolean newEngine)", " cleartext, offset, length, ", " ciphertext, outputOffset, ", " newEngine);" ], "header": "@@ -2102,16 +2101,24 @@ public final class BaseDataFileFactory", "removed": [ " int outputOffset)", " cleartext, offset, length, ciphertext, outputOffset);" ] }, { "added": [ " public void encryptAllContainers(RawTransaction t) throws StandardException", " {", " EncryptData ed = new EncryptData(this);", " // encrypt all the conatiners in the databse", " ed.encryptAllContainers(t);", " }", "" ], "header": "@@ -2126,6 +2133,13 @@ public final class BaseDataFileFactory", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/FileContainer.java", "hunks": [ { "added": [ "\t * Get a latched page. Incase of backup page Latch is necessary to " ], "header": "@@ -2983,7 +2983,7 @@ abstract class FileContainer", "removed": [ "\t * Get a latched page to write to the backup. Page Latch is necessary to " ] }, { "added": [ "\tprotected BasePage getLatchedPage(BaseContainerHandle handle, " ], "header": "@@ -2994,7 +2994,7 @@ abstract class FileContainer", "removed": [ "\tprotected BasePage getPageForBackup(BaseContainerHandle handle, " ] }, { "added": [ " byte[] encryptionBuffer,", " boolean newEngine)" ], "header": "@@ -3173,7 +3173,8 @@ abstract class FileContainer", "removed": [ " byte[] encryptionBuffer)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer.java", "hunks": [ { "added": [ " encryptionBuf, ", " false);" ], "header": "@@ -394,7 +394,8 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction", "removed": [ " encryptionBuf);" ] }, { "added": [ " byte[] encryptionBuf, ", " boolean encryptWithNewEngine) " ], "header": "@@ -466,7 +467,8 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction", "removed": [ " byte[] encryptionBuf) " ] }, { "added": [ " if (dataFactory.databaseEncrypted() || encryptWithNewEngine) ", " return encryptPage(pageData, ", " pageSize, ", " encryptionBuf, ", " encryptWithNewEngine);" ], "header": "@@ -489,9 +491,12 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction", "removed": [ " if (dataFactory.databaseEncrypted()) ", " return encryptPage(pageData, pageSize, encryptionBuf);" ] }, { "added": [ " page = getLatchedPage(handle, pageNumber);" ], "header": "@@ -1159,7 +1164,7 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction", "removed": [ " page = getPageForBackup(handle, pageNumber);" ] }, { "added": [ " encryptionBuf, false);" ], "header": "@@ -1167,7 +1172,7 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction", "removed": [ " encryptionBuf);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/log/LogToFile.java", "hunks": [ { "added": [], "header": "@@ -2876,11 +2876,6 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\t\tString dataEncryption = ", " startParams.getProperty(Attribute.DATA_ENCRYPTION);", "", "\t\tdatabaseEncrypted = Boolean.valueOf(dataEncryption).booleanValue();", "" ] }, { "added": [ "", " /*", " * Set that the database is encrypted, all the data in the ", " * transaction log should be encrypted. ", " */", " public void setDatabaseEncrypted() ", " {", " databaseEncrypted = true;", " }", "", " /*", " * setup log for encryption. ", " */", " public void setupLogEncryption() throws StandardException", " {", " // switch the database to a new log file, so that ", " // new encrytion will start on new log file. ", " switchLogFile();", " }", "", "" ], "header": "@@ -3959,6 +3954,27 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [] } ] } ]
derby-DERBY-1157-28162f65
DERBY-1157: Helper methods in BaseJDBCTestCase for creating data sources Adding getDataSource(), getConnectionPoolDataSource() and getXADataSource() to BaseJDBCTestCase. The new methods call various methods in TestUtil for creating the data sources. A minor adjustment had to be made to TestUtil.getDataSource() in order to make it return JDBC 4.0 DataSources. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@389780 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/functionTests/util/TestUtil.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.info.JVMInfo;" ], "header": "@@ -37,6 +37,7 @@ import java.security.PrivilegedActionException;", "removed": [] }, { "added": [ "\t\t\t// The JDBC 4.0 implementation of the DataSource interface", "\t\t\t// is suffixed with \"40\". Use it if it is available and", "\t\t\t// the JVM version is at least 1.6.", "\t\t\tif (JVMInfo.JDK_ID >= JVMInfo.J2SE_16) {", "\t\t\t\tString classname40 = classname + \"40\";", "\t\t\t\ttry {", "\t\t\t\t\tClass.forName(classname40);", "\t\t\t\t\tclassname = classname40;", "\t\t\t\t} catch (ClassNotFoundException e) {}", "\t\t\t}" ], "header": "@@ -306,6 +307,16 @@ public class TestUtil {", "removed": [] } ] } ]
derby-DERBY-1158-1128268f
DERBY-1158 (partial) Implement downgrading a request to create any holdable statement in a global transaction to return a non-holdable statement. Such a downgrade adds a SQLWarning to the Connection object. This follows section 16.1.3.1 from JDBC 4.0 proposed final draft. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@391383 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection.java", "hunks": [ { "added": [ "import org.apache.derby.impl.jdbc.EmbedSQLWarning;" ], "header": "@@ -28,6 +28,7 @@ import java.sql.DatabaseMetaData;", "removed": [] }, { "added": [ " ", " /**", " * Add a SQLWarning to this Connection object.", " * @throws SQLException ", " */", " public final void addWarning(SQLWarning w) throws SQLException", " {", " getRealConnection().addWarning(w);", " }" ], "header": "@@ -496,6 +497,15 @@ public class BrokeredConnection implements EngineConnection", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/EngineConnection.java", "hunks": [ { "added": [ "import java.sql.SQLWarning;" ], "header": "@@ -22,6 +22,7 @@ package org.apache.derby.iapi.jdbc;", "removed": [] } ] } ]
derby-DERBY-1158-175a7402
DERBY-1158 (partial) Embedded Statements that are holdable automatically downgrade to close cursors on commit during a global transaction. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@392073 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredPreparedStatement.java", "hunks": [ { "added": [], "header": "@@ -55,7 +55,6 @@ public class BrokeredPreparedStatement extends BrokeredStatement", "removed": [ " checkHoldability();" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredStatement.java", "hunks": [ { "added": [ " \t\treturn wrapResultSet(getStatement().executeQuery(sql));" ], "header": "@@ -101,14 +101,12 @@ public class BrokeredStatement implements EngineStatement", "removed": [ " checkHoldability();", " checkHoldability();", "\t\treturn wrapResultSet(getStatement().executeQuery(sql));" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement.java", "hunks": [ { "added": [ " ", " /**", " * Statement reference the application is using to execute", " * this Statement. Normally set to this, but if this was", " * created by a Connection from an XAConnection then this", " * will be a reference to the BrokeredStatement.", " */", " private EngineStatement applicationStatement;" ], "header": "@@ -62,6 +62,14 @@ public class EmbedStatement extends ConnectionChild", "removed": [] }, { "added": [ " applicationStatement = this;" ], "header": "@@ -111,6 +119,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [] }, { "added": [ " ", " /**", " * Set the application statement for this Statement.", " */", " public final void setApplicationStatement(EngineStatement s) {", " this.applicationStatement = s;", " }" ], "header": "@@ -1419,7 +1428,13 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "" ] } ] }, { "file": "java/engine/org/apache/derby/jdbc/XAStatementControl.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.jdbc.EngineStatement;" ], "header": "@@ -25,6 +25,7 @@ import org.apache.derby.iapi.jdbc.BrokeredStatementControl;", "removed": [] }, { "added": [ " ", " ((EmbedStatement) realStatement).setApplicationStatement(", " applicationStatement);" ], "header": "@@ -60,6 +61,9 @@ final class XAStatementControl implements BrokeredStatementControl {", "removed": [] }, { "added": [ " ((EmbedStatement) realPreparedStatement).setApplicationStatement(", " applicationStatement);" ], "header": "@@ -67,6 +71,8 @@ final class XAStatementControl implements BrokeredStatementControl {", "removed": [] }, { "added": [ " ((EmbedStatement) realCallableStatement).setApplicationStatement(", " applicationStatement);" ], "header": "@@ -74,6 +80,8 @@ final class XAStatementControl implements BrokeredStatementControl {", "removed": [] }, { "added": [ " " ], "header": "@@ -96,7 +104,7 @@ final class XAStatementControl implements BrokeredStatementControl {", "removed": [ "" ] }, { "added": [ " ((EmbedStatement) realStatement).setApplicationStatement(", " applicationStatement);" ], "header": "@@ -104,6 +112,8 @@ final class XAStatementControl implements BrokeredStatementControl {", "removed": [] }, { "added": [ " ((EmbedStatement) realPreparedStatement).setApplicationStatement(", " applicationStatement);" ], "header": "@@ -143,6 +153,8 @@ final class XAStatementControl implements BrokeredStatementControl {", "removed": [] } ] } ]
derby-DERBY-1158-23b4f8b6
DERBY-1158 (partial) Some method protection cleanup on EmbedStatement and EmbedPreparedStatement git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@389822 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedPreparedStatement.java", "hunks": [ { "added": [ "\tboolean executeBatchElement(Object batchElement) throws SQLException, StandardException {" ], "header": "@@ -815,7 +815,7 @@ public abstract class EmbedPreparedStatement", "removed": [ "\tprotected boolean executeBatchElement(Object batchElement) throws SQLException, StandardException {" ] }, { "added": [ "\tboolean executeStatement(Activation a," ], "header": "@@ -1415,7 +1415,7 @@ public abstract class EmbedPreparedStatement", "removed": [ "\tprotected boolean executeStatement(Activation a," ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement.java", "hunks": [ { "added": [ "\tint updateCount = -1;", "\tjava.sql.ResultSet results;", "\tprivate String cursorName;", "\tprivate final boolean forMetaData;", "\tfinal LanguageConnectionContext lcc;", "\tString SQLText;" ], "header": "@@ -62,21 +62,21 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tprotected int updateCount = -1;", "\tprotected java.sql.ResultSet results;", "\tprotected String cursorName;", "\tprotected final boolean forMetaData;", "\tprotected final LanguageConnectionContext lcc;", "\tprotected String SQLText;" ] }, { "added": [ "\tprivate boolean active = true;", " \tVector batchStatements;" ], "header": "@@ -84,11 +84,11 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tprotected boolean active = true;", " \tprotected Vector batchStatements;" ] }, { "added": [ "\tfinal void checkIfInMiddleOfBatch() throws SQLException {" ], "header": "@@ -223,7 +223,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tprotected final void checkIfInMiddleOfBatch() throws SQLException {" ] }, { "added": [ "\tprivate boolean execute(String sql, boolean executeQuery, boolean executeUpdate," ], "header": "@@ -524,7 +524,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tprotected boolean execute(String sql, boolean executeQuery, boolean executeUpdate," ] }, { "added": [ "\tboolean executeBatchElement(Object batchElement) throws SQLException, StandardException {" ], "header": "@@ -940,7 +940,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tprotected boolean executeBatchElement(Object batchElement) throws SQLException, StandardException {" ] }, { "added": [ "\tboolean executeStatement(Activation a," ], "header": "@@ -1082,7 +1082,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tprotected boolean executeStatement(Activation a," ] }, { "added": [ "\tvoid clearResultSets() throws SQLException {" ], "header": "@@ -1328,7 +1328,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tprotected void clearResultSets() throws SQLException {" ] }, { "added": [ "\tvoid checkRequiresCallableStatement(Activation activation) throws SQLException {" ], "header": "@@ -1390,7 +1390,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tprotected void checkRequiresCallableStatement(Activation activation) throws SQLException {" ] } ] } ]
derby-DERBY-1158-4e3b1b6b
DERBY-1015 (partial) Add an interface for Statement objects from the engine, EngineStatement. Removes some use of reflection in brokered objects and the network server. Is also a step towards solving DERBY-1158, downgrading holdabilty in global transactions. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@391691 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/DRDAStatement.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.jdbc.EngineStatement;" ], "header": "@@ -38,6 +38,7 @@ import java.util.Vector;", "removed": [] }, { "added": [ "\t * get resultSetHoldability.", "\t\tStatement rsstmt;", " ", " int holdValue = ", " ((EngineStatement) rsstmt).getResultSetHoldability();", "", "\t * get resultSetHoldability.", "\tint getResultSetHoldability(ResultSet rs) throws SQLException", "\t\tStatement rsstmt;", " ", " int holdValue = ", " ((EngineStatement) rsstmt).getResultSetHoldability();", "" ], "header": "@@ -249,65 +250,47 @@ class DRDAStatement", "removed": [ "\t * get resultSetHoldability with reflection. ", "\t * We need to use reflection so we can use hold cursors with 1.3.1. ", "\t * And also since our statement might be a BrokeredStatement.", "\t\tStatement rsstmt = null;", "\t\tint holdValue = -1;", "\t\t\t\t", "\t\tClass[] getResultSetHoldabilityParam = {};", "\t\ttry {", "\t\t\tMethod sh =", "\t\t\t\trsstmt.getClass().getMethod(\"getResultSetHoldability\", getResultSetHoldabilityParam);", "\t\t\tholdValue = ((Integer) sh.invoke(rsstmt,null)).intValue();", "\t\t}", "\t\tcatch (Exception e) {", "\t\t\thandleReflectionException(e);", "\t\t}", "\t * get resultSetHoldability with reflection. ", "\t * We need to use reflection so we can use hold cursors with 1.3.1. ", "\t * And also since our statement might be a BrokeredStatement.", "\tprotected int getResultSetHoldability(ResultSet rs) throws SQLException", "\t\tStatement rsstmt = null;", "\t\tint holdValue = -1;", "\t\t\t\t", "\t\tClass[] getResultSetHoldabilityParam = {};", "\t\ttry {", "\t\t\tMethod sh =", "\t\t\t\trsstmt.getClass().getMethod(\"getResultSetHoldability\", getResultSetHoldabilityParam);", "\t\t\tholdValue = ((Integer) sh.invoke(rsstmt,null)).intValue();", "\t\t}", "\t\tcatch (Exception e) {", "\t\t\thandleReflectionException(e);", "\t\t}" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredStatement.java", "hunks": [ { "added": [ "public class BrokeredStatement implements EngineStatement" ], "header": "@@ -36,7 +36,7 @@ import java.lang.reflect.*;", "removed": [ "public class BrokeredStatement implements Statement" ] }, { "added": [ " return ((EngineStatement) getStatement()).getMoreResults( current);" ], "header": "@@ -441,7 +441,7 @@ public class BrokeredStatement implements Statement", "removed": [ " return getStatement().getMoreResults( current);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.jdbc.EngineStatement;" ], "header": "@@ -32,6 +32,7 @@ import org.apache.derby.iapi.sql.ResultSet;", "removed": [] }, { "added": [ " implements EngineStatement {" ], "header": "@@ -58,7 +59,7 @@ import java.sql.ResultSet;", "removed": [ " implements java.sql.Statement {" ] } ] } ]
derby-DERBY-1158-4f2355d7
DERBY-1158 DERBY-1159 Remove special casing of network client for holdability tests using executeUpdate methods. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@392084 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1158-562671ec
DERBY-1158 (partial) Re-factor some code to enable BrokeredStatement.getResultSetHoldability() to return CLOSE_CURSORS_ON_COMMIT when in a global transaction. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@391842 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredPreparedStatement.java", "hunks": [ { "added": [ " checkHoldability();" ], "header": "@@ -55,7 +55,7 @@ public class BrokeredPreparedStatement extends BrokeredStatement", "removed": [ "\t\tcontrolCheck().checkHoldCursors(resultSetHoldability);" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredStatement.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.reference.SQLState;", "import org.apache.derby.impl.jdbc.Util;", "" ], "header": "@@ -21,10 +21,13 @@", "removed": [] }, { "added": [ " checkHoldability();", " checkHoldability();" ], "header": "@@ -98,13 +101,13 @@ public class BrokeredStatement implements EngineStatement", "removed": [ "\t\tcontrolCheck().checkHoldCursors(resultSetHoldability);", "\t\tcontrolCheck().checkHoldCursors(resultSetHoldability);" ] }, { "added": [ " /**", " * Return the holdability of ResultSets created by this Statement.", " * If this Statement is active in a global transaction the", " * CLOSE_CURSORS_ON_COMMIT will be returned regardless of", " * the holdability it was created with. In a local transaction", " * the original create holdabilty will be returned.", " */", " int holdability =", " ((EngineStatement) getStatement()).getResultSetHoldability();", " ", " // Holdability might be downgraded.", " return controlCheck().checkHoldCursors(holdability);" ], "header": "@@ -461,10 +464,21 @@ public class BrokeredStatement implements EngineStatement", "removed": [ "\t\treturn ((EngineStatement) getStatement()).getResultSetHoldability();" ] }, { "added": [ "\t\t\tnewStatement = conn.createStatement(resultSetType, resultSetConcurrency,", " resultSetHoldability);" ], "header": "@@ -478,7 +492,8 @@ public class BrokeredStatement implements EngineStatement", "removed": [ "\t\t\tnewStatement = conn.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement.java", "hunks": [ { "added": [ " ", " boolean executeHoldable = getExecuteHoldable();", " ", "\t\t\t\ta.setResultSetHoldability(executeHoldable);" ], "header": "@@ -1151,8 +1151,10 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "", "\t\t\t\ta.setResultSetHoldability(resultSetHoldability != JDBC30Translation.CLOSE_CURSORS_AT_COMMIT);" ] } ] } ]
derby-DERBY-1158-72cc5532
DERBY-1158 (partial) Some cleanup of field & method permissions in the embedded JDBC driver and rename getWarnings to addWarning to correctly reflect its use. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@389630 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java", "hunks": [ { "added": [ "\tprivate ResultSet theResults;" ], "header": "@@ -116,7 +116,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\tprotected ResultSet theResults;" ] }, { "added": [ "\tprivate final boolean isAtomic;", "\tprivate final int concurrencyOfThisResultSet;" ], "header": "@@ -173,9 +173,9 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\tprotected final boolean isAtomic;", "\tprotected final int concurrencyOfThisResultSet;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement.java", "hunks": [ { "added": [ "\tfinal int resultSetType;", "\tprivate final int resultSetConcurrency;", "\tprivate final int resultSetHoldability;" ], "header": "@@ -70,9 +70,9 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tpublic final int resultSetType;", "\tprotected final int resultSetConcurrency;", "\tprotected final int resultSetHoldability;" ] }, { "added": [ " public final int getResultSetHoldability() throws SQLException {" ], "header": "@@ -817,7 +817,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ " public int getResultSetHoldability() throws SQLException {" ] }, { "added": [ "\t\t\t\taddWarning(ps.getCompileTimeWarnings());" ], "header": "@@ -1134,7 +1134,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\t\t\t\tgetWarnings(ps.getCompileTimeWarnings());" ] }, { "added": [ "" ], "header": "@@ -1150,6 +1150,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [] }, { "added": [ "\t\t\t\taddWarning(a.getWarnings());" ], "header": "@@ -1162,7 +1163,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\t\t\t\tgetWarnings(a.getWarnings());" ] }, { "added": [ " /**", " * Add a SQLWarning to this Statement object.", " * If the Statement already has a SQLWarning then it", " * is added to the end of the chain.", " * ", " * @see #getWarnings()", " */", "\tfinal void addWarning(SQLWarning sw)" ], "header": "@@ -1233,7 +1234,14 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tprotected void getWarnings(SQLWarning sw)" ] }, { "added": [ "\t\t\t\taddWarning(StandardException.newWarning(SQLState.LANG_TOO_MANY_DYNAMIC_RESULTS_RETURNED));" ], "header": "@@ -1454,7 +1462,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\t\t\t\tgetWarnings(StandardException.newWarning(SQLState.LANG_TOO_MANY_DYNAMIC_RESULTS_RETURNED));" ] } ] } ]
derby-DERBY-1158-b01ccf59
DERBY-1196 Network server closes prepared statements prematurely if exception occurs during OPNQRY - Takes out close of prepared statement on exception in OPNQRY in DRDAConnThread processCommands - Updates the JCC master for derbnet/setTransactionIsolation. This test was getting Statement already closed errors for valid statements with JCC that are no longer in the master. - Enables test for DERBY-1047 in checkDataSource ( Note this case actually passes withtout the fix because of the fix for DERBY-1158) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@392781 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1158-ce9d2145
DERBY-1158 (partial) Re-factor check holdability methods in the brokered stament and connection control to support downgrading the holdability when needed. This commit still implements at the user level throwing an exception for requesting holdable ResultSets in a global transaction. A subsequent commit will change to creting a warning on the Connection when creating a Statement with holdable ResultSets. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@389825 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.reference.SQLState;" ], "header": "@@ -38,6 +38,7 @@ import java.lang.reflect.*;", "removed": [] }, { "added": [ " resultSetHoldability = statementHoldabilityCheck(resultSetHoldability);", " \t\t", " \t\t\tgetRealConnection().prepareStatement(sql, resultSetType,", " resultSetConcurrency, resultSetHoldability), sql, null);" ], "header": "@@ -536,9 +537,11 @@ public class BrokeredConnection implements EngineConnection", "removed": [ " \t\tcontrol.checkHoldCursors(resultSetHoldability);", " \t\t\tgetRealConnection().prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability), sql, null);" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection30.java", "hunks": [ { "added": [ " resultSetHoldability = statementHoldabilityCheck(resultSetHoldability);", "\t\t\treturn control.wrapStatement(getRealConnection().createStatement(resultSetType,", " resultSetConcurrency, resultSetHoldability));" ], "header": "@@ -43,8 +43,9 @@ public class BrokeredConnection30 extends BrokeredConnection", "removed": [ "\t\t\tcontrol.checkHoldCursors(resultSetHoldability);", "\t\t\treturn control.wrapStatement(getRealConnection().createStatement(resultSetType, resultSetConcurrency, resultSetHoldability));" ] }, { "added": [ " resultSetHoldability = statementHoldabilityCheck(resultSetHoldability);", "\t\t\t\tgetRealConnection().prepareCall(sql, resultSetType,", " resultSetConcurrency, resultSetHoldability), sql);" ], "header": "@@ -58,9 +59,10 @@ public class BrokeredConnection30 extends BrokeredConnection", "removed": [ "\t\t\tcontrol.checkHoldCursors(resultSetHoldability);", "\t\t\t\tgetRealConnection().prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability), sql);" ] } ] }, { "file": "java/engine/org/apache/derby/jdbc/XAStatementControl.java", "hunks": [ { "added": [ "\tpublic int checkHoldCursors(int holdability) throws SQLException {", "\t\treturn xaConnection.checkHoldCursors(holdability, false);", " \t}" ], "header": "@@ -197,7 +197,7 @@ final class XAStatementControl implements BrokeredStatementControl {", "removed": [ "\tpublic void checkHoldCursors(int holdability) throws SQLException {", "\t\txaConnection.checkHoldCursors(holdability);", "\t}" ] } ] } ]
derby-DERBY-1159-0bf3bf88
DERBY-1159 Add some test cases that show the bug. Execution of a statement that does not return a ResultSet with various JDBC Statement objects using executeUpdate in a global transaction. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@389648 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1159-4f2355d7
DERBY-1158 DERBY-1159 Remove special casing of network client for holdability tests using executeUpdate methods. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@392084 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-117-4551a6e8
DERBY-117 Improvements to the Servlet - improved use of tracingDirectory: * init-parameter is now used (it was documented but not used before) * current value is shown when the user try to change it - improved function that handles the request locale (for instance, mozilla uses a - instead of _ to separate the country from language) - shows on the screen which port the server is running on * added a new SRV_POrtNumber message key for such info Note: I did not commit the translated messages as I could not get them to patch. Contributed by Filipe Leme git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@332444 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/drda/NetServlet.java", "hunks": [ { "added": [], "header": "@@ -23,12 +23,10 @@ package org.apache.derby.drda;", "removed": [ "import java.sql.*;", "import java.security.PrivilegedActionException;" ] }, { "added": [ "\t\t\t\"ko_KR\", \"pt_BR\", \"zh_CN\", \"zh_TW\"};" ], "header": "@@ -60,7 +58,7 @@ public class NetServlet extends HttpServlet {", "removed": [ "\t\t\t\"ko_KR\", \"zh_CN\", \"zh_TW\"};" ] }, { "added": [ "\t" ], "header": "@@ -71,7 +69,7 @@ public class NetServlet extends HttpServlet {", "removed": [ "" ] }, { "added": [ "\t\t" ], "header": "@@ -84,7 +82,7 @@ public class NetServlet extends HttpServlet {", "removed": [ "" ] }, { "added": [ "\t\tthis.tracingDirectory = config.getInitParameter(\"tracingDirectory\");", "\t\t", "\t\tif ( this.tracingDirectory == null ) {", "\t\t\tthis.tracingDirectory = \"\";", "\t\t}", "\t\t\tif (server == null) {", "\t\t\t\t// assert this.tracingDirectory != null", "\t\t\t\tif ( ! this.tracingDirectory.trim().equals(\"\")) {", "\t\t\t\t\tserver.setTraceDirectory(this.tracingDirectory);", "\t\t\t\t}", "\t\t\t}", "\t\t\t" ], "header": "@@ -94,15 +92,25 @@ public class NetServlet extends HttpServlet {", "removed": [ "\t\tString tracingDirectory = config.getInitParameter(\"tracingDirectory\");", "\t\t\tif (server == null)" ] }, { "added": [ "\t\t\t\t\t\"id='tracedir' value='\"+tracingDirectory+\"'>\");" ], "header": "@@ -401,7 +409,7 @@ public class NetServlet extends HttpServlet {", "removed": [ "\t\t\t\t\t\"id='tracedir' value=''>\");" ] }, { "added": [ "\t\t\tval = p.getProperty(Property.DRDA_PROP_PORTNUMBER);", "\t\t\tout.println( \"<h4>\"+localUtil.getTextMessage(\"SRV_PortNumber\", val)+\"</h4>\");", "\t\t\t" ], "header": "@@ -658,6 +666,9 @@ public class NetServlet extends HttpServlet {", "removed": [] }, { "added": [ "\t\t\tthis.tracingDirectory = traceDirectory;" ], "header": "@@ -789,6 +800,7 @@ public class NetServlet extends HttpServlet {", "removed": [] }, { "added": [ "\t\t// trim any whitespace and fix the code, as some browsers might send a bad format", "\t\tlang = fixLanguageCode(lang.trim());" ], "header": "@@ -938,9 +950,8 @@ public class NetServlet extends HttpServlet {", "removed": [ "\t\t// trim any whitespace", "\t\tlang = lang.trim();", "\t\t" ] } ] } ]
derby-DERBY-1172-f342d5bc
DERBY-1172 incorrect error message in updateRow() after a commit on a held scroll insensitive resultset. Submitted by Andreas Korneliussen git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@391153 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1174-cbd7b157
DERBY-1174 NullPointerException in network server with LDAP authentication Submitted by Anders Morken git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@395525 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/authentication/JNDIAuthenticationSchemeBase.java", "hunks": [ { "added": [ "\t\t// if any. If dbProps == null, there are obviously no database", "\t\t// properties to retrieve.", " ", "\t\tif(dbProps != null) {", "\t\t\tfor (Enumeration keys = dbProps.propertyNames(); keys.hasMoreElements(); ) {", "\t\t\t\tString key = (String) keys.nextElement();", "\t\t\t\tif (key.startsWith(\"java.naming.\")) {", "\t\t\t\t\tinitDirContextEnv.put(key, dbProps.getProperty(key));", "\t\t\t\t}" ], "header": "@@ -97,16 +97,19 @@ public abstract class JNDIAuthenticationSchemeBase implements UserAuthenticator", "removed": [ "\t\t// if any.", "\t\tfor (Enumeration keys = dbProps.propertyNames(); keys.hasMoreElements(); ) {", "\t\t\tString key = (String) keys.nextElement();", "", "\t\t\tif (key.startsWith(\"java.naming.\")) {", "\t\t\t\tinitDirContextEnv.put(key, dbProps.getProperty(key));" ] } ] } ]
derby-DERBY-1176-ec2bfafe
DERBY-1176: Stored prepared statements in the SYSIBM schema are not updated on upgrade 1. DD_Version.doFullUpgrade() now drops and recreates SPSs in all system schemas, not SYSIBM only. 2. EmbedDatabaseMetaData already has the necessary machinery to load queries from system tables or metadata.properties depending on whether the engine is in soft upgrade mode or not. I extended it so that it could read queries from metadata_net.properties as well. 3. New method EmbedDatabaseMetaData.getClientCachedMetaData() which executes SYSIBM.METADATA (either from system table or metadata_net.properties) to fetch the metadata that will be cached on the client. 4. SystemProcedures.METADATA() now invokes EmbedDatabaseMetaData.getClientCachedMetaData() instead of executing the METADATA SPS directly. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@395799 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedDatabaseMetaData.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.sanity.SanityManager;", "" ], "header": "@@ -24,6 +24,8 @@ import org.apache.derby.iapi.services.info.ProductVersionHolder;", "removed": [] }, { "added": [ " /** Cached query descriptions from metadata.properties. */", " private static Properties queryDescriptions;", " /** Cached query descriptions from metadata_net.properties. */", " private static Properties queryDescriptions_net;", " /**", " * Return all queries found in either metadata.properties or", " * metadata_net.properties.", " *", " * @param net if <code>true</code>, read metadata_net.properties;", " * otherwise, read metadata.properties.", " * @return a <code>Properties</code> value with the queries", " */", " private Properties getQueryDescriptions(boolean net) {", " Properties p = net ? queryDescriptions_net : queryDescriptions;", " if (p != null) {", " return p;", " }", " loadQueryDescriptions();", " return net ? queryDescriptions_net : queryDescriptions;", " }", " /**", " * Read the query descriptions from metadata.properties and", " * metadata_net.properties. This method must be invoked from", " * within a privileged block.", " */", " private void PBloadQueryDescriptions() {", " String[] files = {", " \"metadata.properties\",", " \"/org/apache/derby/impl/sql/catalog/metadata_net.properties\"", " };", " Properties[] props = new Properties[files.length];", " for (int i = 0; i < files.length; ++i) {", " try {", " props[i] = new Properties();", " // SECURITY PERMISSION - IP3", " InputStream is = getClass().getResourceAsStream(files[i]);", " props[i].load(is);", " is.close();", " } catch (IOException ioe) {", " if (SanityManager.DEBUG) {", " SanityManager.THROWASSERT(\"Error reading \" + files[i], ioe);", " }", " }", " }", " queryDescriptions = props[0];", " queryDescriptions_net = props[1];", " }" ], "header": "@@ -115,28 +117,54 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\tprivate static Properties queryDescriptions;", "\tprotected final Properties getQueryDescriptions() {", "\t\tProperties p = EmbedDatabaseMetaData.queryDescriptions;", "\t\tif (p != null)", "\t\t\treturn p;", "", "\t\treturn (EmbedDatabaseMetaData.queryDescriptions = loadQueryDescriptions());", "\t}", "", "\tprivate Properties PBloadQueryDescriptions() {", "\t\tProperties p = new Properties();", "\t\ttry {", "\t\t\t// SECURITY PERMISSION - IP3", "\t\t\tInputStream is = getClass().getResourceAsStream(\"metadata.properties\");", "\t\t\t", "\t\t\tp.load(is);", "\t\t\tis.close();", "\t\t} catch (IOException ioe) {", "\t\t}", "\t\treturn p;", "\t}" ] }, { "added": [ "\t\t\tString queryText =", "\t\t\t\tgetQueryDescriptions(false).getProperty(\"getTables\");" ], "header": "@@ -1648,7 +1676,8 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\t\t\tString queryText = getQueryDescriptions().getProperty(\"getTables\");" ] }, { "added": [ " * metadata.properties or metadata_net.properties file rather than", " * rely on system tables." ], "header": "@@ -2187,7 +2216,8 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ " * metadata.properties file rather than rely on system tables." ] }, { "added": [ " String queryText = getQueryDescriptions(false).getProperty(\"getUDTs\");" ], "header": "@@ -2918,7 +2948,7 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ " String queryText = getQueryDescriptions().getProperty(\"getUDTs\");" ] }, { "added": [ " /**", " * Get metadata that the client driver will cache. The metadata is", " * fetched using SYSIBM.METADATA (found in metadata_net.properties).", " *", " * @return the result set returned by SYSIBM.METADATA", " * @exception SQLException if a database error occurs", " */", " public ResultSet getClientCachedMetaData() throws SQLException {", " return getSimpleQuery(\"METADATA\", true);", " }", "", " /**", " * Execute a query in metadata.properties (or SPS in the SYS", " * schema) or metadata_net.properties (or SPS in the SYSIBM", " * schema).", " *", " * @param nameKey the name of the query", " * @param net if <code>true</code>, execute a query in", " * metadata_net.properties; otherwise, execute a query in", " * metadata.properties", " * @return a <code>ResultSet</code> value", " * @exception SQLException if a database error occurs", " */", " private ResultSet getSimpleQuery(String nameKey, boolean net)", " throws SQLException", "\t\tPreparedStatement ps = getPreparedQuery(nameKey, net);", " /**", " * Execute a query in metadata.properties, or an SPS in the SYS", " * schema.", " *", " * @param nameKey the name of the query", " * @return a <code>ResultSet</code> value", " * @exception SQLException if a database error occurs", " */", " protected ResultSet getSimpleQuery(String nameKey) throws SQLException {", " return getSimpleQuery(nameKey, false);", " }", "", " /**", " * Get a stored prepared statement from the system tables.", " *", " * @param nameKey the name of the query", " * @param net if <code>true</code>, find query in SYSIBM schema;", " * otherwise, find query in SYS schema", " * @return a <code>PreparedStatement</code> value", " * @exception SQLException if a database error occurs", " */", " private PreparedStatement getPreparedQueryUsingSystemTables(String nameKey,", " boolean net)", " throws SQLException " ], "header": "@@ -3285,20 +3315,67 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\tprotected ResultSet getSimpleQuery(String nameKey) throws SQLException", "\t\tPreparedStatement ps = getPreparedQuery(nameKey);", "\tprivate PreparedStatement getPreparedQueryUsingSystemTables(String nameKey) throws SQLException " ] }, { "added": [ "\t\t\t\tString queryText =", "\t\t\t\t\tgetQueryDescriptions(net).getProperty(nameKey);", " ps = prepareSPS(nameKey, queryText, net);" ], "header": "@@ -3307,13 +3384,14 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\t\t\t\tString queryText = getQueryDescriptions().getProperty(nameKey);", " ps = prepareSPS(nameKey, queryText);" ] }, { "added": [ "\t * system tables, or from the metadata.properties or", "\t * metadata_net.properties file." ], "header": "@@ -3331,7 +3409,8 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\t * system tables or from the metadata.properties file." ] }, { "added": [ "\t * Getting queries from metadata(_net).properties might cause problems" ], "header": "@@ -3339,7 +3418,7 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\t * Getting queries from metadata.properties might cause problems" ] }, { "added": [ "\t * @param net if <code>true</code>, use metadata_net.properties", "\t * instead of metadata.properties", "\t * @exception SQLException if a database error occurs", "\tprivate PreparedStatement getPreparedQuery(String queryName,", "\t\t\t\t\t\t\t\t\t\t\t boolean net)", "\t\t\ts = getPreparedQueryUsingSystemTables(queryName, net);", "\t\t\t\t//or metadata_net.properties", "\t\t\t\tString queryText = getQueryFromDescription(queryName, net);" ], "header": "@@ -3347,21 +3426,26 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\tprotected PreparedStatement getPreparedQuery(String queryName)", "\t\t\ts = getPreparedQueryUsingSystemTables(queryName);", "\t\t\t\tString queryText = getQueryFromDescription(queryName);" ] }, { "added": [ " /**", " * Get a prepared query from system tables or metadata.properties.", " *", " * @param queryName name of the query", " * @return a <code>PreparedStatement</code> value", " * @exception SQLException if a database error occurs", " */", " protected PreparedStatement getPreparedQuery(String queryName)", " throws SQLException", " {", " return getPreparedQuery(queryName, false);", " }", "", "\t/**" ], "header": "@@ -3370,7 +3454,20 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\t/*" ] }, { "added": [ "\t *", "\t * @param queryName name of the query", "\t * @param net if <code>true</code>, get the query from", "\t * metadata_net.properties instead of metadata.properties", "\t * @return the query text", "\t * @exception StandardException if an error occurs", "\tprivate String getQueryFromDescription(String queryName, boolean net)" ], "header": "@@ -3385,8 +3482,14 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\tprivate String getQueryFromDescription(String queryName)" ] }, { "added": [ "\t\treturn getQueryDescriptions(net).getProperty(queryName);" ], "header": "@@ -3403,7 +3506,7 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\t\treturn getQueryDescriptions().getProperty(queryName);" ] }, { "added": [ "\t\t\t\t\t\t\t\t\t\t String\tspsText,", "\t\t\t\t\t\t\t\t\t\t boolean net)" ], "header": "@@ -3413,7 +3516,8 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\t\t\t\t\t\t\t\t\t\t String\tspsText)" ] }, { "added": [ "\t\t\t\t\t\t\t\t\t\tnet ? dd.getSysIBMSchemaDescriptor() :" ], "header": "@@ -3428,6 +3532,7 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [] }, { "added": [ "\t\tString queryText =", "\t\t\t\"EXECUTE STATEMENT \" + (net ? \"SYSIBM\" : \"SYS\") +", "\t\t\t\".\\\"\" + spsName + \"\\\"\";", "\t\treturn getEmbedConnection().prepareMetaDataStatement(queryText);" ], "header": "@@ -3446,8 +3551,10 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\t\treturn getEmbedConnection().prepareMetaDataStatement(", "\t\t\t\t\t\t\t\t\t\"EXECUTE STATEMENT SYS.\\\"\"+spsName+\"\\\"\");" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/DD_Version.java", "hunks": [ { "added": [ "\t\tbootingDictionary.createSystemSps(tc);" ], "header": "@@ -321,7 +321,7 @@ public\tclass DD_Version implements\tFormatable", "removed": [ "\t\tbootingDictionary.createSPSSet(tc, false, bootingDictionary.getSystemSchemaDescriptor().getUUID());" ] }, { "added": [ "\t * @param removeSYSIBMonly if <code>true</code>, remove stored", "\t * prepared statements in the SYSIBM schema only; otherwise,", "\t * remove stored prepared statements in all system schemas", "\t * (including SYSIBM)" ], "header": "@@ -440,6 +440,10 @@ public\tclass DD_Version implements\tFormatable", "removed": [] }, { "added": [ "", "\t\t\t// don't drop statements in non-system schemas", "\t\t\tif (!sd.isSystemSchema() && !isSYSIBM) {", "\t\t\t\tcontinue;", "\t\t\t}", "", "\t\t\t// don't drop statements outside the SYSIBM schema if", "\t\t\t// we're told not to", "\t\t\tif (removeSYSIBMonly && !isSYSIBM) {", "", "\t\t\tbootingDictionary.dropSPSDescriptor(spsd, tc);", "\t\t\tbootingDictionary.dropDependentsStoredDependencies(spsd.getUUID(),", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t tc);" ], "header": "@@ -452,16 +456,21 @@ public\tclass DD_Version implements\tFormatable", "removed": [ "\t\t\tif (! sd.isSystemSchema() && ! isSYSIBM)", "\t\t\t/*", "\t\t\t** Is it in SYS? if so, zap it.", "\t\t\t*/", "\t\t\tif ((removeSYSIBMonly && isSYSIBM) || (! removeSYSIBMonly && ! isSYSIBM))", "\t\t\t{", "\t\t\t\tbootingDictionary.dropSPSDescriptor(spsd, tc);", "\t\t\t\tbootingDictionary.dropDependentsStoredDependencies(spsd.getUUID(), tc);" ] } ] } ]
derby-DERBY-1180-a4846de8
DERBY-1180: Committed Kristian's patches providing tests and vacuous implementations for missing signatures: derby-1180-2a-client.diff and derby-1180-3a-testing.diff. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@396669 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/Blob.java", "hunks": [ { "added": [ "import java.io.InputStream;", "", "" ], "header": "@@ -20,8 +20,11 @@", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/am/Clob.java", "hunks": [ { "added": [ "import java.io.Reader;", "" ], "header": "@@ -20,7 +20,9 @@", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/NetResultSet40.java", "hunks": [ { "added": [ " public Reader getNCharacterStream(int columnIndex)", " throws SQLException {", " throw SQLExceptionFactory.notImplemented(\"getNCharacterStream(int)\");", " }", "", " public Reader getNCharacterStream(String columnName)", " throws SQLException {", " throw SQLExceptionFactory.notImplemented(\"getNCharacterStream(String)\");", " }", "", " public String getNString(int columnIndex)", " throws SQLException {", " throw SQLExceptionFactory.notImplemented(\"getNString(int)\");", " }", "", " public String getNString(String columnName)", " throws SQLException {", " throw SQLExceptionFactory.notImplemented(\"getNString(String)\");", " }", " " ], "header": "@@ -57,6 +57,26 @@ public class NetResultSet40 extends NetResultSet{", "removed": [] } ] } ]
derby-DERBY-1180-b64ccfc4
DERBY-1180: Commit Kristian's derby-1180-4c-logicalconnection_and_tests.diff patch, adding vacuous implementations for missing signatures. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@398594 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/LogicalConnection.java", "hunks": [ { "added": [ " protected Connection physicalConnection_ = null; // reset to null when the logical connection is closed." ], "header": "@@ -33,7 +33,7 @@ import java.sql.SQLException;", "removed": [ " private Connection physicalConnection_ = null; // reset to null when the logical connection is closed." ] } ] }, { "file": "java/client/org/apache/derby/client/net/ClientJDBCObjectFactoryImpl.java", "hunks": [ { "added": [ "import org.apache.derby.client.am.LogicalConnection;" ], "header": "@@ -26,6 +26,7 @@ import java.sql.SQLException;", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/ClientJDBCObjectFactoryImpl40.java", "hunks": [ { "added": [ "import org.apache.derby.client.am.LogicalConnection;", "import org.apache.derby.client.am.LogicalConnection40;" ], "header": "@@ -27,6 +27,8 @@ import org.apache.derby.client.am.CallableStatement40;", "removed": [] } ] } ]
derby-DERBY-1180-fcf7ef38
DERBY-1180 (partial): Add vacuous implementations of missing JDBC4 methods Stubs for missing JDBC 4.0 methods in the embedded driver. Patch contributed by Kristian Waagan <Kristian.Waagan@Sun.COM>. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@395872 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection40.java", "hunks": [ { "added": [ "import java.sql.BaseQuery;", "import java.sql.Blob;" ], "header": "@@ -20,6 +20,8 @@", "removed": [] }, { "added": [ " public Blob createBlob() throws SQLException {", " throw Util.notImplemented();", " }", "" ], "header": "@@ -37,6 +39,10 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet40.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.ResultSet;", "", "import java.io.Reader;" ], "header": "@@ -20,11 +20,13 @@", "removed": [ "import org.apache.derby.iapi.sql.ResultSet;" ] }, { "added": [ " public void updateNCharacterStream(int columnIndex, Reader x, int length) ", " throws SQLException {", " throw Util.notImplemented();", " }", " ", " public void updateNCharacterStream(String columnName, Reader x, int length)", " throws SQLException {", " throw Util.notImplemented();", " }", "" ], "header": "@@ -62,6 +64,16 @@ public class EmbedResultSet40 extends org.apache.derby.impl.jdbc.EmbedResultSet2", "removed": [] }, { "added": [ " public Reader getNCharacterStream(int columnIndex) throws SQLException {", " throw Util.notImplemented();", " }", " ", " public Reader getNCharacterStream(String columnName) throws SQLException {", " throw Util.notImplemented();", " }", "" ], "header": "@@ -78,6 +90,14 @@ public class EmbedResultSet40 extends org.apache.derby.impl.jdbc.EmbedResultSet2", "removed": [] }, { "added": [ " public String getNString(int columnIndex) throws SQLException {", " throw Util.notImplemented();", " }", " ", " public String getNString(String columnName) throws SQLException {", " throw Util.notImplemented();", " }", " " ], "header": "@@ -86,6 +106,14 @@ public class EmbedResultSet40 extends org.apache.derby.impl.jdbc.EmbedResultSet2", "removed": [] } ] } ]
derby-DERBY-1183-c63d941b
DERBY-435 jdbcapi/checkDataSource.java and jdbapi/checkDataSource30.java to tests should be changed or extended to test client DataSources - Brings checkDataSource and checkDataSourc30 into derbynetclientmats - Reenables tests for DERBY-1044 now that it is fixed. Tests are excluded for some bugs uncovered by this test that have yet to be resolved: DERBY-1035, DERBY-1047, DERBY-1025, DERBY-1148, DERBY-1183 git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@391902 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1183-d51b773e
DERBY-1036: Embedded and client differ with regard to Statement.setCursor Name. client will disallow duplicate cursor names. Embedded will allow it. and DERBY-1183: Client java.sql.ResultSet.getCursorName() does not return the correct cursor name for Statements after the first execution Patch description: The problem underlying DERBY-1183 is the following: A Statement (and by inheritance, PreparedStatement and CallableStatement) will have a cursor name associated with result set generated by the statement. Such names are either set by the user (Statement#setCursorName) or assigned by the system (aka "canned" cursor names in the client driver). In either case, the user can get at the cursor name attached to a statement by calling Resultset#getCursorName. This string can be used to construct the SQL text of positioned update or delete statement. Currently, when a cursor name is set on a statement, the following actions are taken (Statement#setCursorName): 1. The statement's string variable cursorName_ is set to the new string. This keeps track of the user cursor name set for this statement. 2. A cache (clientCursorNameCache) of cursor names set by the user for this connection is maintained to avoid having any two identical cursor name for the same statement/result set. When the cursor name is set, the check is performed (2a) and the cache is updated with the new cursor name if all is well (2b). If the statement (only applies for PreparedStatement and CallableStatement) already has a DRDA section (ch 7.14) allocated, the following is also performed: 3. A map (cursor name -> section) is updated. 4. The section associated with this statement gets its variable clientCursorName set to the supplied cursor name. For Statement, this association (steps 3 and 4) is performed at statement execution time (inside flowExecute, just after the new section is allocated for a query). 5. Also in flowExecute (of Statement, PreparedStatement) the cursor (user set or canned) is associated with the result set (call to mapCursorNameToResultSet). The symptom of this bug is that the cursor name set for a statement seems to be lost after one execution of that statement, reverting to a canned cursor name for executions 2..n. This happens because the cursor name is actually reset as part of the next execute on a statement object: 6. Open result sets for a statement are closed when flowExecute calls readCloseResultSets. During the closing of a result set, ResultSet#markClosed calls statement_.removeClientCursorNameFromCache, which in addition to removing the user cursor name from the cache also sets the statements cursorName_ to null, causing it effectively to be forgotten. Since #5 happens *after* #6 in flowExecute, the execution still works, but with a canned cursor instead of the user named cursor (see mapCursorNameToResultSet). This "race condition" suggests deferring the setup of mappings and insertion into the name cache till execution time (*after* the closing of open result sets on this statement). When studying this I found there was already a bug filed (DERBY-1036) which advises that we defer the check of duplicates till execution time, so this patch fixes both DERBY-1183 and DERBY-1036. In the patch, Statement#setCursorName now only sets the string variable cursorName_. The patch also removes the resetting of cursorName_ in removeClientCursorNameFromCache, so it won't be forgotten. It is still removed from the cache when the result set is closed (and current maps deleted). Furthermore, it moves #2, #3 and #4 till execution time (flowExecute). Item #2a is performed at the start of flowExecute, so we can avoid starting any protocol action if it turns out that we should not start execution (duplicate name). The actions needed in Statement#flowExecute and PreparedStatement#flowExecute are similar, so I factored these out into the methods Statement# checkForDuplicateCursorName and Statement#setupCursorNameCacheAndMappings. Next, I removed code from getCursorName which redundantly performs #3 (now always handled at execute time). CAVEAT: When working on understanding what goes on here, I found that the Statement finalizer can lead to cursors being released; this is part of the client side clean-up for statements that are not explicitly closed (DERBY-210). This can sometimes lead to time variability in canned cursor names in client driver tests which do not close statements explicitly. If the canned cursor name occurs in the canon file, we can see spurious diff due to this. I think we have seen this in the past. Avoid it by closing statements or by naming the cursors explicitly. Finally, I removed work-around code in jdbcapi/checkDataSource.java for derby-1036 and derby-1183 and updated masters to reflect that the cursor names are no longer forgotten. Patch submitted by Dag H. Wanvik. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@416661 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/PreparedStatement.java", "hunks": [ { "added": [ " // DERBY-1036: Moved check till execute time to comply with embedded", " // behavior. Since we check here and not in setCursorName, several", " // statements can have the same cursor name as long as their result", " // sets are not simultaneously open.", "", " if (sqlMode_ == isQuery__) {", " checkForDuplicateCursorName();", " }", "" ], "header": "@@ -1588,6 +1588,15 @@ public class PreparedStatement extends Statement", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/am/Statement.java", "hunks": [ { "added": [ " // s.setCursorName()) adds a user-defined name, but it is not", " // added to the cursor map until execution time (DERBY-1036);", " // then is given the canned cursor name as defined by our jdbc package set." ], "header": "@@ -110,9 +110,10 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " // When set (s.setCursorName()) with a user-defined name, then it is added to the cursor map at that time;", " // then is given the canned cursor name as defined by our jdbc package set and added to the cursor map." ] }, { "added": [ " // DERBY-1036: Duplicate cursor names not allowed, check", " // deferred till execute time. " ], "header": "@@ -824,22 +825,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " // Duplicate cursor names not allowed.", " if (connection_.clientCursorNameCache_.containsKey(name)) {", " throw new SqlException(agent_.logWriter_, ", " new ClientMessageId(SQLState.CURSOR_DUPLICATE_NAME), name);", " }", " connection_.clientCursorNameCache_.put(name, name);", "", " // section_ is null for Statement objects. We will defer the mapping of cursorName", " // to section until when the query is executed.", " if (section_ != null) {", " agent_.sectionManager_.mapCursorNameToQuerySection(name, (Section) section_);", " // This means we must subtitute the <users-cursor-name> with the <canned-cursor-name>", " // in the pass-thru sql string \"...where current of <canned-cursor-name>\".", " section_.setClientCursorName(name);", " }" ] }, { "added": [ " // DERBY-1036: Moved check till execute time to comply with embedded", " // behavior. Since we check here and not in setCursorName, several", " // statements can have the same cursor name as long as their result", " // sets are not simultaneously open.", "", " if (sqlMode_ == isQuery__) {", " checkForDuplicateCursorName();", " }", "" ], "header": "@@ -1893,6 +1881,15 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [] }, { "added": [], "header": "@@ -1912,16 +1909,6 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " // if client's cursor name is set, map it to the query section in the hashtable", " // after we obtain the section.", " if (cursorName_ != null) {", " agent_.sectionManager_.mapCursorNameToQuerySection(cursorName_, newSection);", "", " // This means we must subtitute the <users-cursor-name> with the <canned-cursor-name>", " // in the pass-thru sql string \"...where current of <canned-cursor-name>\".", " newSection.setClientCursorName(cursorName_);", " }", "" ] }, { "added": [ "", " // DERBY-1183: If we set it up it earlier, the entry in", " // clientCursorNameCache_ gets wiped out by the closing of", " // result sets happening during readCloseResultSets above", " // because ResultSet#markClosed calls", " // Statement#removeClientCursorNameFromCache.", " setupCursorNameCacheAndMappings();" ], "header": "@@ -2028,9 +2015,13 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " // If client's cursor name is set, map the client's cursor name to the ResultSet", " // Else map the server's cursor name to the ResultSet", " mapCursorNameToResultSet();" ] }, { "added": [ " // Two open result sets can not have the same cursor name. ", " protected void checkForDuplicateCursorName() throws SqlException {", " if (cursorName_ != null && (connection_.clientCursorNameCache_.", " containsKey(cursorName_))) {", " throw new SqlException", " (agent_.logWriter_, ", " new ClientMessageId(SQLState.CURSOR_DUPLICATE_NAME), ", " cursorName_);", " }", " }", "", "", " // Set up information to be able to handle cursor names:", " // canned or user named (via setCursorName).", " protected void setupCursorNameCacheAndMappings() {", " if (cursorName_ != null) {", " // The user has set a cursor name for this statement.", " // This means we must subtitute the <users-cursor-name>", " // with the <canned-cursor-name> in the pass-thru sql", " // string \"...where current of <canned-cursor-name>\"", " // whenever the result set produced by this statement", " // is referenced in a positioned update/delete statement.", " agent_.sectionManager_.mapCursorNameToQuerySection", " (cursorName_, section_);", " section_.setClientCursorName(cursorName_);", " ", " // Update cache to avoid duplicates of user set cursor name.", " connection_.clientCursorNameCache_.put(cursorName_, ", " cursorName_);", " } else {", "\t // canned cursor name", "\t agent_.sectionManager_.mapCursorNameToQuerySection", " (section_.getServerCursorName(), section_);", "\t}", "", " // If client's cursor name is set, map the client's cursor name to the", " // result set, else map the server's cursor name to the result set.", " mapCursorNameToResultSet();", " }", "", "" ], "header": "@@ -2411,6 +2402,47 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [] } ] } ]
derby-DERBY-1184-b94443fd
DERBY-1184: registerOutParameter(int,int,String) should throw exception Patch contributed by Kristian Waagan (Kristian.Waagan@Sun.com) The method 'CallableStatement.registerOutParameter(int,int,String)' does nothing in the client driver. As stated in DERBY-447, the method throws a not-implemented exception in the embedded driver. The method should be changed to do this on the client side as well. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@392304 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1187-b8ddf447
DERBY-1187 Defragment pass was not being committed properly, so when rows at front of table were being deleted and freeing pages the defragment pass did not move the rows to the free pages. Longer term the defragment pass should commit more often, see DERBY-1188. Added extra test to OnlineCompressTest.java for this case. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@392439 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1188-b8ddf447
DERBY-1187 Defragment pass was not being committed properly, so when rows at front of table were being deleted and freeing pages the defragment pass did not move the rows to the free pages. Longer term the defragment pass should commit more often, see DERBY-1188. Added extra test to OnlineCompressTest.java for this case. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@392439 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1191-60d10e68
DERBY-1191 (partial) Some SQLExceptions, for example those generated from BrokeredStatements, do not print to derby.log even when derby.stream.error.logSeverityLevel=0 Here is a patch that takes the approach of adding a public static void logAndThrowSQLException(SQLException se) method and then calling that instead of just throwing the exception. The initial patch only uses the method for EmbedConnection.checkForTransactionInProgress() which is the most important exception to log after the fix for DERBY-3319. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@803948 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/Util.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.error.ErrorStringBuilder;", "import org.apache.derby.iapi.services.property.PropertyUtil;", "import org.apache.derby.iapi.services.stream.HeaderPrintWriter;", "import org.apache.derby.iapi.services.monitor.Monitor;", "import org.apache.derby.iapi.reference.Property;" ], "header": "@@ -21,15 +21,20 @@", "removed": [] }, { "added": [ "", "\tprivate static int logSeverityLevel = PropertyUtil.getSystemInt(Property.LOG_SEVERITY_LEVEL,", "\t\tSanityManager.DEBUG ? 0 : ExceptionSeverity.SESSION_SEVERITY);" ], "header": "@@ -68,6 +73,9 @@ public abstract class Util {", "removed": [] }, { "added": [ " * Log SQLException to the error log if the severity exceeds the ", " * logSeverityLevel and then throw it. This method can be used for ", " * logging JDBC exceptions to derby.log DERBY-1191.", " * ", " * @param se SQLException to log and throw", " * @throws SQLException", " */", " public static void logAndThrowSQLException(SQLException se) throws SQLException {", " \tif (se.getErrorCode() >= logSeverityLevel){", " \tlogSQLException(se);", " \t}", " \tthrow se;", " }", " ", "\t/**", "\t * Log an SQLException to the error log or to the console if there is no", "\t * error log available.", "\t * This method could perhaps be optimized to have a static shared", "\t * ErrorStringBuilder and synchronize the method, but this works for now.", "\t * ", "\t * @param se SQLException to log", "\t */", "\tprivate static void logSQLException(SQLException se) {", " \tif (se == null)", " \t\treturn;", " \tString message = se.getMessage();", " \tString sqlstate = se.getSQLState();", " \tif ((sqlstate != null) && (sqlstate.equals(SQLState.LOGIN_FAILED)) && ", " \t\t\t(message != null) && (message.equals(\"Connection refused : java.lang.OutOfMemoryError\")))\t\t\t\t", " \t\treturn;", "", " \tHeaderPrintWriter errorStream = Monitor.getStream();", " \tif (errorStream == null) {", " \t\tse.printStackTrace();", " \t\treturn;", " \t}", " \tErrorStringBuilder\terrorStringBuilder = new ErrorStringBuilder(errorStream.getHeader());", " \terrorStringBuilder.append(\"\\nERROR \" + se.getSQLState() + \": \" + se.getMessage() + \"\\n\");", " \terrorStringBuilder.stackTrace(se);", " \terrorStream.print(errorStringBuilder.get().toString());", " \terrorStream.flush();", " \terrorStringBuilder.reset();", "", " }", "", "\t", "\t/**" ], "header": "@@ -75,6 +83,53 @@ public abstract class Util {", "removed": [] } ] } ]
derby-DERBY-1196-b01ccf59
DERBY-1196 Network server closes prepared statements prematurely if exception occurs during OPNQRY - Takes out close of prepared statement on exception in OPNQRY in DRDAConnThread processCommands - Updates the JCC master for derbnet/setTransactionIsolation. This test was getting Statement already closed errors for valid statements with JCC that are no longer in the master. - Enables test for DERBY-1047 in checkDataSource ( Note this case actually passes withtout the fix because of the fix for DERBY-1158) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@392781 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1198-5d2e2bd2
DERBY-843: Internationalize SectionManager to XaException in client/am package. Also cleaned up any remaining hardcoded text in this package. Also fixed DERBY-1198, where jdbc4/StatementTest.java was using SQLState directly. Passes derbynetclientmats on jdk14 and jdbc40 on jdk16, and fixed output files for jdk13 and jdk16 for those tests whose output files were affected in the derbynetclientmats run. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@395484 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyBuild/splitmessages.java", "hunks": [ { "added": [], "header": "@@ -67,7 +67,6 @@ public class splitmessages {", "removed": [ " clientMessageIds.add(\"J104\");" ] }, { "added": [ " clientMessageIds.add(SQLState.LANG_INVALID_CALL_TO_EXECUTE_UPDATE);", " clientMessageIds.add(SQLState.LANG_CANT_INVALIDATE_OPEN_RESULT_SET);", " clientMessageIds.add(SQLState.YEAR_EXCEEDS_MAXIMUM);", " clientMessageIds.add(SQLState.CONNECTION_FAILED_ON_RESET);", " clientMessageIds.add(SQLState.DECIMAL_TOO_MANY_DIGITS);", " clientMessageIds.add(SQLState.NUMERIC_OVERFLOW);", " clientMessageIds.add(SQLState.UNSUPPORTED_HOLDABILITY_PROPERTY);", " clientMessageIds.add(SQLState.CANCEL_NOT_SUPPORTED_BY_SERVER);", " clientMessageIds.add(SQLState.LANG_INVALID_CALL_STATEMENT);", " clientMessageIds.add(SQLState.LOSS_OF_PRECISION_EXCEPTION);", " clientMessageIds.add(SQLState.LANG_INVALID_SQL_IN_BATCH);" ], "header": "@@ -84,10 +83,21 @@ public class splitmessages {", "removed": [] }, { "added": [ " * We assume all message ids starting with \"XJ\" or \"J\" are client messages", " * of explicitly adding each XJ or J shared message, and covers 90% of the" ], "header": "@@ -191,9 +201,9 @@ public class splitmessages {", "removed": [ " * We assume all message ids starting with \"XJ\" are client messages", " * of explicitly adding each XJ shared message, and covers 90% of the" ] } ] }, { "file": "java/client/org/apache/derby/client/am/Agent.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.reference.SQLState;", "" ], "header": "@@ -20,6 +20,8 @@", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/am/Connection.java", "hunks": [ { "added": [ " DisconnectException de = new DisconnectException(agent_, ", " new MessageId(SQLState.CONNECTION_FAILED_ON_RESET));" ], "header": "@@ -1835,7 +1835,8 @@ public abstract class Connection implements java.sql.Connection,", "removed": [ " DisconnectException de = new DisconnectException(agent_, \"An error occurred during connect reset and the connection has been terminated. See chained exceptions for details.\");" ] } ] }, { "file": "java/client/org/apache/derby/client/am/DatabaseMetaData.java", "hunks": [ { "added": [ " new MessageId(SQLState.STORED_PROC_NOT_INSTALLED));", " new MessageId(SQLState.STORED_PROC_LOAD_MODULE_NOT_FOUND));" ], "header": "@@ -2071,12 +2071,12 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " \"The required stored procedure is not installed on the server.\");", " \"The load module name for the stored procedure on the server is not found. \");" ] } ] }, { "file": "java/client/org/apache/derby/client/am/DateTime.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.i18n.MessageUtil;", "import org.apache.derby.shared.common.reference.SQLState;", "" ], "header": "@@ -19,6 +19,9 @@", "removed": [] }, { "added": [ " throw new java.lang.IllegalArgumentException(", " MessageUtil.getCompleteMessage(SQLState.LANG_FORMAT_EXCEPTION,", " SqlException.CLIENT_MESSAGE_RESOURCE_NAME,", " (Object[])null));" ], "header": "@@ -72,7 +75,10 @@ public class DateTime {", "removed": [ " throw new java.lang.IllegalArgumentException(\"Unsupported date format!\");" ] }, { "added": [ " java.sql.Date date) throws SqlException {", " throw new SqlException(null,", " new MessageId(SQLState.YEAR_EXCEEDS_MAXIMUM),", " new Integer(year), \"9999\");" ], "header": "@@ -224,10 +230,12 @@ public class DateTime {", "removed": [ " java.sql.Date date) throws ConversionException {", " throw new ConversionException(\"Year exceeds the maximum \\\"9999\\\".\");" ] }, { "added": [ " java.sql.Timestamp timestamp) throws SqlException {", " throw new SqlException(null,", " new MessageId(SQLState.YEAR_EXCEEDS_MAXIMUM),", " new Integer(year), \"9999\");" ], "header": "@@ -283,10 +291,12 @@ public class DateTime {", "removed": [ " java.sql.Timestamp timestamp) throws ConversionException {", " throw new ConversionException(\"Year exceeds the maximum \\\"9999\\\".\");" ] }, { "added": [ " java.sql.Timestamp timestamp) throws SqlException {", " throw new SqlException(null,", " new MessageId(SQLState.YEAR_EXCEEDS_MAXIMUM),", " new Integer(year), \"9999\");" ], "header": "@@ -544,10 +554,12 @@ public class DateTime {", "removed": [ " java.sql.Timestamp timestamp) throws ConversionException {", " throw new ConversionException(\"Year exceeds the maximum \\\"9999\\\".\");" ] } ] }, { "file": "java/client/org/apache/derby/client/am/Decimal.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.reference.SQLState;", "import org.apache.derby.shared.common.i18n.MessageUtil;", "" ], "header": "@@ -19,6 +19,9 @@", "removed": [] }, { "added": [ " ", " private static MessageUtil msgutil = new MessageUtil(", " SqlException.CLIENT_MESSAGE_RESOURCE_NAME);" ], "header": "@@ -28,6 +31,9 @@ public class Decimal {", "removed": [] }, { "added": [ " throw new java.lang.IllegalArgumentException(", " msgutil.getTextMessage(SQLState.DECIMAL_TOO_MANY_DIGITS));" ], "header": "@@ -243,7 +249,8 @@ public class Decimal {", "removed": [ " throw new java.lang.IllegalArgumentException(\"Decimal may only be up to 31 digits!\");" ] }, { "added": [ " throw new java.lang.IllegalArgumentException(", " msgutil.getTextMessage(SQLState.DECIMAL_TOO_MANY_DIGITS));" ], "header": "@@ -304,7 +311,8 @@ public class Decimal {", "removed": [ " throw new java.lang.IllegalArgumentException(\"Decimal may only be up to 31 digits!\");" ] }, { "added": [ " throw new java.lang.IllegalArgumentException(", " msgutil.getTextMessage(SQLState.DECIMAL_TOO_MANY_DIGITS));" ], "header": "@@ -319,7 +327,8 @@ public class Decimal {", "removed": [ " throw new java.lang.IllegalArgumentException(\"Decimal may only be up to 31 digits!\");" ] }, { "added": [ " throws SqlException {", " throw new SqlException(null,", " new MessageId(SQLState.DECIMAL_TOO_MANY_DIGITS));" ], "header": "@@ -361,10 +370,11 @@ public class Decimal {", "removed": [ " throws ConversionException {", " throw new ConversionException(\"Packed decimal may only be up to 31 digits!\");" ] }, { "added": [ " throw new SqlException(null,", " new MessageId(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE),", " \"packed decimal\", new SqlCode(-405));" ], "header": "@@ -374,11 +384,9 @@ public class Decimal {", "removed": [ " throw new ConversionException(\"The numeric literal \\\"\" +", " b.toString() +", " \"\\\" is not valid because its value is out of range.\",", " \"42820\",", " -405);" ] } ] }, { "file": "java/client/org/apache/derby/client/am/SectionManager.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.reference.SQLState;", "" ], "header": "@@ -22,6 +22,8 @@ package org.apache.derby.client.am;", "removed": [] }, { "added": [ " throw new SqlException(agent_.logWriter_,", " new MessageId(SQLState.UNSUPPORTED_HOLDABILITY_PROPERTY), ", " new Integer(resultSetHoldability));" ], "header": "@@ -116,7 +118,9 @@ public class SectionManager {", "removed": [ " throw new SqlException(agent_.logWriter_, \"resultSetHoldability property \" + resultSetHoldability + \" not supported\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.EXCEEDED_MAX_SECTIONS),", " \"32k\");" ], "header": "@@ -131,7 +135,9 @@ public class SectionManager {", "removed": [ " throw new SqlException(agent_.logWriter_, \"Run out of sections to use,sections limited to 32k currently\");" ] } ] }, { "file": "java/client/org/apache/derby/client/am/SqlCode.java", "hunks": [ { "added": [ " public SqlCode(int code) {" ], "header": "@@ -30,7 +30,7 @@ package org.apache.derby.client.am;", "removed": [ " private SqlCode(int code) {" ] } ] }, { "file": "java/client/org/apache/derby/client/am/SqlException.java", "hunks": [ { "added": [ " // Constants for message ids used in text we print out -- not used", " // in SqlExceptions", " public static final String CAUSED_BY_EXCEPTION_ID = \"J106\";", " public static final String BATCH_POSITION_ID = \"J107\";", " " ], "header": "@@ -86,6 +86,11 @@ public class SqlException extends Exception implements Diagnosable {", "removed": [] }, { "added": [ " ", " /**", " * Use this to override the standard error code that is derived", " * from the message severity", " */", " public SqlException(LogWriter logWriter, MessageId msgid, Object[] args,", " SqlCode sqlcode) {", " this(logWriter, msgid, args);", " this.errorcode_ = sqlcode.getCode();", " }", " ", " public SqlException(LogWriter logWriter, MessageId msgid, SqlCode sqlcode) {", " this(logWriter, msgid, (Object[])null, sqlcode);", " }", " ", " public SqlException(LogWriter logWriter, MessageId msgid, Object arg1,", " SqlCode sqlcode) {", " this(logWriter, msgid, new Object[] {arg1}, sqlcode);", " }", " ", " public SqlException(LogWriter logWriter, MessageId msgid, Object arg1,", " Object arg2, SqlCode sqlcode) {", " this(logWriter, msgid, new Object[] {arg1, arg2}, sqlcode);", " }" ], "header": "@@ -125,6 +130,30 @@ public class SqlException extends Exception implements Diagnosable {", "removed": [] }, { "added": [ " causeString_ = \" \" + ", " msgutil_.getTextMessage(CAUSED_BY_EXCEPTION_ID) + \" \" +" ], "header": "@@ -225,7 +254,8 @@ public class SqlException extends Exception implements Diagnosable {", "removed": [ " causeString_ = \" Caused by exception \" + " ] }, { "added": [ " batchPositionLabel_ = msgutil_.getTextMessage(BATCH_POSITION_ID) + ", " index + \": \";" ], "header": "@@ -331,7 +361,8 @@ public class SqlException extends Exception implements Diagnosable {", "removed": [ " batchPositionLabel_ = \"Error for batch element #\" + index + \": \";" ] }, { "added": [], "header": "@@ -466,31 +497,6 @@ public class SqlException extends Exception implements Diagnosable {", "removed": [ "// An intermediate exception encapsulation to provide code-reuse", "// for common ResultSet and ResultSetMetaData column access exceptions.", "", "class ColumnIndexOutOfBoundsException extends SqlException {", " ColumnIndexOutOfBoundsException(LogWriter logWriter, Throwable throwable, int resultSetColumn) {", " super(logWriter, throwable,", " \"Invalid argument:\" +", " \" Result column index \" + resultSetColumn + \" is out of range.\");", " }", "}", "", "// An intermediate exception encapsulation to provide code-reuse", "// for common ResultSet data conversion exceptions.", "", "class NumberFormatConversionException extends SqlException {", " NumberFormatConversionException(LogWriter logWriter, String instance) {", " super(logWriter,", " \"Invalid data conversion:\" +", " \" Result column instance \" +", " instance +", " \" is either an invalid numeric representation\" +", " \" or is out of range.\");", " }", "}", "" ] } ] }, { "file": "java/client/org/apache/derby/client/am/Statement.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.reference.SQLState;" ], "header": "@@ -22,6 +22,7 @@ package org.apache.derby.client.am;", "removed": [] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.INVALID_API_PARAMETER),", " new Integer(type), \"type\", \"createStatement()\");" ], "header": "@@ -320,8 +321,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid argument: \" +", " \"ResultSet Type \" + type + \" is invalid.\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.INVALID_API_PARAMETER),", " new Integer(concurrency), \"concurrency\",", " \"createStatement()\");" ], "header": "@@ -330,8 +332,10 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid argument: \" +", " \"ResultSet Concurrency \" + concurrency + \" is invalid.\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_,", " new MessageId(SQLState.INVALID_API_PARAMETER),", " new Integer(holdability), \"holdability\",", " \"createStatement()\");" ], "header": "@@ -340,8 +344,10 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid argument: \" +", " \"ResultSet holdability \" + holdability + \" is invalid.\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.INVALID_API_PARAMETER),", " new Integer(autoGeneratedKeys),", " \"autoGeneratedKeys\", \"createStatement\");" ], "header": "@@ -350,9 +356,10 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid argument: \" +", " \"Statement auto-generated keys value \" + autoGeneratedKeys +", " \" is invalid.\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.MULTIPLE_RESULTS_ON_EXECUTE_QUERY),", " jdbcStatementInterfaceName, jdbcStatementInterfaceName);", " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.USE_EXECUTE_UPDATE_WITH_NO_RESULTS),", " jdbcStatementInterfaceName, jdbcStatementInterfaceName);" ], "header": "@@ -408,15 +415,15 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, jdbcStatementInterfaceName + \".executeQuery() cannot be called \" +", " \"because multiple result sets were returned.\" +", " \" Use \" + jdbcStatementInterfaceName + \".execute() to obtain multiple results.\");", " throw new SqlException(agent_.logWriter_, jdbcStatementInterfaceName + \".executeQuery() was called \" +", " \"but no result set was returned.\" +", " \" Use \" + jdbcStatementInterfaceName + \".executeUpdate() for non-queries.\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.MULTIPLE_RESULTS_ON_EXECUTE_QUERY),", " jdbcStatementInterfaceName, jdbcStatementInterfaceName);", " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.LANG_INVALID_CALL_TO_EXECUTE_UPDATE));" ], "header": "@@ -450,16 +457,15 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, jdbcStatementInterfaceName + \".executeUpdate() cannot be called \" +", " \"because multiple result sets returned.\" +", " \" Use \" + jdbcStatementInterfaceName + \".execute() to obtain multiple results.\");", " throw new SqlException(agent_.logWriter_, jdbcStatementInterfaceName + \".executeUpdate() was called \" +", " \"but a result set was returned.\" +", " \" Use \" + jdbcStatementInterfaceName + \".executeQuery() to obtain a result set.\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.INVALID_MAXFIELD_SIZE),", " new Integer(max));" ], "header": "@@ -560,7 +566,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid maxFieldSize value: \" + max);" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.INVALID_MAX_ROWS_VALUE),", " new Integer(maxRows));" ], "header": "@@ -595,7 +603,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid maxRows value: \" + maxRows);" ] }, { "added": [ " new MessageId(SQLState.INVALID_QUERYTIMEOUT_VALUE),", " new Integer(seconds));" ], "header": "@@ -647,8 +657,8 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " \"Attempt to set a negative query timeout\",", " \"XJ074.S\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.CANCEL_NOT_SUPPORTED_BY_SERVER));" ], "header": "@@ -669,7 +679,8 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"cancel() not supported by server\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.CURSOR_INVALID_NAME),", " name);", " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.LANG_CANT_INVALIDATE_OPEN_RESULT_SET),", " \"setCursorName()\", \"Statement\");", " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.CURSOR_DUPLICATE_NAME), name);" ], "header": "@@ -712,18 +723,22 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid cursor name.\");", " throw new SqlException(agent_.logWriter_, \"Invalid operation: setCursorName() \" +", " \"called when there are open ResultSet's on the Statement.\");", " throw new SqlException(agent_.logWriter_, \"Duplicate cursor names are not allowed.\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_,", " new MessageId(SQLState.INVALID_FETCH_DIRECTION),", " new Integer(direction));" ], "header": "@@ -850,7 +865,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid fetch direction \" + direction);" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.INVALID_ST_FETCH_SIZE),", " new Integer(rows)).getSQLException();" ], "header": "@@ -885,7 +902,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid fetch size \" + rows).getSQLException();" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.NOT_IMPLEMENTED),", " \"executeUpdate(String, int[])\");" ], "header": "@@ -1132,7 +1151,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Driver not capable\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.NOT_IMPLEMENTED),", " \"execute(String, int[])\");" ], "header": "@@ -1189,7 +1210,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Driver not capable\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.CURSOR_INVALID_NAME),", " cursorName);" ], "header": "@@ -1826,8 +1849,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid cursor name \\\"\" + cursorName +", " \"\\\" in the Update/Delete statement.\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.UNABLE_TO_OPEN_RS_WITH_REQUESTED_HOLDABILITY),", " new Integer(resultSetHoldability_));" ], "header": "@@ -1966,8 +1990,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Unable to open resultSet with requested \" +", " \"holdability \" + resultSetHoldability_ + \".\");" ] }, { "added": [ " new MessageId(SQLState.BATCH_CHAIN_BREAKING_EXCEPTION)));" ], "header": "@@ -2065,8 +2090,7 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " \"Non-recoverable chain-breaking exception occurred during batch processing. \" +", " \"The batch is terminated non-atomically.\"));" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.NO_TOKENS_IN_SQL_TEXT), sql);" ], "header": "@@ -2172,7 +2196,8 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"SQL passed with no tokens\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.NO_TOKENS_IN_SQL_TEXT), sql);", " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.LANG_INVALID_CALL_STATEMENT));", " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.LANG_INVALID_CALL_STATEMENT));" ], "header": "@@ -2207,14 +2232,17 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"bugcheck\");", " throw new SqlException(agent_.logWriter_, \"bugcheck\");", " throw new SqlException(agent_.logWriter_, \"Invalid CALL syntax\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.CANT_USE_EXEC_QUERY_FOR_UPDATE));", " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.LANG_INVALID_CALL_TO_EXECUTE_UPDATE));" ], "header": "@@ -2244,10 +2272,12 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"executeQuery method cannot be used for update.\");", " throw new SqlException(agent_.logWriter_, \"executeUpdate method cannot be used for query.\");" ] }, { "added": [ " if ( this.connection_ == null || this.connection_.isClosed() )", " throw new SqlException(agent_.logWriter_,", " new MessageId(SQLState.NO_CURRENT_CONNECTION));", " ", " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.LANG_STATEMENT_CLOSED_NO_REASON));" ], "header": "@@ -2258,9 +2288,14 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid operation: statement closed\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.NULL_SQL_TEXT));" ], "header": "@@ -2269,7 +2304,8 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Null batched SQL string passed.\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.LANG_INVALID_SQL_IN_BATCH), sql);" ], "header": "@@ -2279,7 +2315,8 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid SQL in Batch\");" ] }, { "added": [ " throw new SqlException(agent_.logWriter_,", " new MessageId(SQLState.INVALID_API_PARAMETER),", " new Integer(autoGeneratedKeys_), \"autoGeneratedKeys\",", " \"Statement.execute()/executeQuery()\");", " throw new SqlException(agent_.logWriter_, ", " new MessageId(SQLState.NOT_IMPLEMENTED),", " \"Connection.prepareStatement(String sql, String[] columnNames)\");" ], "header": "@@ -2478,13 +2515,16 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " throw new SqlException(agent_.logWriter_, \"Invalid argument: \" +", " \"Statement auto-generated keys value \" + autoGeneratedKeys_ +", " \" is invalid.\");", " throw new SqlException(agent_.logWriter_, \"Driver not capable\");" ] } ] }, { "file": "java/client/org/apache/derby/client/am/Utils.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.SQLBit;", "import org.apache.derby.shared.common.i18n.MessageUtil;" ], "header": "@@ -21,6 +21,8 @@", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/am/Version.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.i18n.MessageUtil;", "", " static MessageUtil msgutil = ", " new MessageUtil(SqlException.CLIENT_MESSAGE_RESOURCE_NAME);", " ", " // Constants for internationalized message ids", " private static String SECURITY_MANAGER_NO_ACCESS_ID = \"J108\";", " private static String UNKNOWN_HOST_ID = \"J109\";", " " ], "header": "@@ -20,9 +20,18 @@", "removed": [] }, { "added": [ " printWriter.println(header + ", " msgutil.getTextMessage(SECURITY_MANAGER_NO_ACCESS_ID, property));" ], "header": "@@ -142,7 +151,8 @@ public abstract class Version {", "removed": [ " printWriter.println(header + \"Security manager does not permit access to system property \" + property);" ] } ] }, { "file": "java/client/org/apache/derby/client/net/Request.java", "hunks": [ { "added": [ " int length = org.apache.derby.client.am.Decimal.bigDecimalToPackedDecimalBytes(bytes_, offset_, v, declaredPrecision, declaredScale);", " offset_ += length;", " ensureLength(offset_ + 10);", " org.apache.derby.client.am.DateTime.dateToDateBytes(bytes_, offset_, date);", " offset_ += 10;" ], "header": "@@ -1518,24 +1518,14 @@ public class Request {", "removed": [ " try {", " int length = org.apache.derby.client.am.Decimal.bigDecimalToPackedDecimalBytes(bytes_, offset_, v, declaredPrecision, declaredScale);", " offset_ += length;", " } catch (org.apache.derby.client.am.ConversionException e) {", " throw new SqlException(netAgent_.logWriter_, e,", " \"BigDecimal conversion exception \" + e.getMessage() + \". See attached Throwable.\");", " }", " try {", " ensureLength(offset_ + 10);", " org.apache.derby.client.am.DateTime.dateToDateBytes(bytes_, offset_, date);", " offset_ += 10;", " } catch (org.apache.derby.client.am.ConversionException e) {", " throw new SqlException(netAgent_.logWriter_, e,", " \"Date conversion exception \" + e.getMessage() + \". See attached Throwable.\");", " }" ] } ] }, { "file": "java/shared/org/apache/derby/shared/common/reference/SQLState.java", "hunks": [ { "added": [ " String YEAR_EXCEEDS_MAXIMUM = \"22003.S.1\";", " String DECIMAL_TOO_MANY_DIGITS = \"22003.S.2\";", " String NUMERIC_OVERFLOW = \"22003.S.3\";" ], "header": "@@ -665,6 +665,9 @@ public interface SQLState {", "removed": [] }, { "added": [ " String LOSS_OF_PRECISION_EXCEPTION = \"22015.S.1\";" ], "header": "@@ -678,6 +681,7 @@ public interface SQLState {", "removed": [] }, { "added": [ " String LANG_INVALID_SQL_IN_BATCH = \"42ZA1\";" ], "header": "@@ -1005,6 +1009,7 @@ public interface SQLState {", "removed": [] }, { "added": [ " String UNSUPPORTED_HOLDABILITY_PROPERTY = \"0A000.S.3\";", " String CANCEL_NOT_SUPPORTED_BY_SERVER = \"0A000.S.4\";", "" ], "header": "@@ -1346,6 +1351,9 @@ public interface SQLState {", "removed": [] }, { "added": [ " ", " String EXCEEDED_MAX_SECTIONS = \"XJ200.S\";", " String MULTIPLE_RESULTS_ON_EXECUTE_QUERY = \"XJ201.S\";", " String CURSOR_INVALID_NAME = \"XJ202.S\";", " String CURSOR_DUPLICATE_NAME = \"XJ203.S\";", " String UNABLE_TO_OPEN_RS_WITH_REQUESTED_HOLDABILITY = \"XJ204.S\";", " String USE_EXECUTE_UPDATE_WITH_NO_RESULTS = \"XJ205.S\";", " String NO_TOKENS_IN_SQL_TEXT = \"XJ206.S\";", " String CANT_USE_EXEC_QUERY_FOR_UPDATE = \"XJ207.S\";", " String BATCH_NON_ATOMIC_FAILURE = \"XJ208.S\";", " String STORED_PROC_NOT_INSTALLED = \"XJ209.S\";", " String STORED_PROC_LOAD_MODULE_NOT_FOUND = \"XJ210.S\";", " String BATCH_CHAIN_BREAKING_EXCEPTION = \"XJ211.S\";", " " ], "header": "@@ -1485,7 +1493,20 @@ public interface SQLState {", "removed": [ "" ] } ] } ]
derby-DERBY-1205-e540aee4
DERBY-1007: Follow up patch to earlier submitted patch. In a word, the fix for this issue ensures that, in the case of subqueries, the optimizer will correctly propagate the estimated costs for subqueries up to the parent subquery(-ies), thus allowing the parent query to make a better decision about which join order is ultimately the best. As seen in the example scenario included above, the correct estimates are higher--sometimes much higher--than what the optimizer was returning prior to this change: in the example, the optimizer was returning an incorrect cost estimate of 10783 before the patch, and a correct estimate of 1 million after the patch (where "correct" means that it's the value calculated by the optimizer and thus the value that should be returned; I'm not saying anything about the accuracy of the estimate here). One side effect of this is that, for very deeply nested queries and/or queries with a high number of FROM tables/expressions, the higher cost estimates can be multiplied--sometimes many times over--throughout the optimization process, which means that the overall query estimate can climb to a much larger number much more quickly. If the query is big enough, this can actually cause the optimizer to reach an estimated cost of INFINITY. That said, the current optimizer logic for choosing a plan does not expect to see an estimate of infinity for its plans. As a result the optimizer does comparisons of, and arithmetic with, cost estimates and row counts that, when applied to Infinity, give unexpected results. I have filed DERBY-1259 and DERBY-1260 to address the "infinity problem" in more detail, but am attaching here a follow-up patch that takes some basic steps toward making the optimizer more robust in the face of infinite cost estimates, which are now more likely to occur given the DERBY-1007 changes. In particular, the d1007_followup_v1.patch does the following: 1) Fixes a couple of small problems with the handling of estimates for FromBaseTables, to ensure that a FromBaseTable's estimate is correctly propagated to (and handled by) the ProjectRestrictNode that sits above it. This parallels the original DERBY-1007 work but is a much simpler "follow-up" task as it deals only with base tables instead of subqueries, and thus the changes are fairly minor. 2) There are several places in OptimizerImpl where the optimizer will only choose to accept a plan's cost if the cost is less than the current "bestCost". If no best cost has been found yet, bestCost is set to an uninitialized value of Double.MAX_VALUE with the assumption that the first valid plan will have a cost less than Double.MAX_VALUE and thus will be chosen as the best so far. However, since a plan's cost estimate can actually end up being Double.POSITIVE_INFINITY, which is greater than Double.MAX_VALUE, it's possible that the optimizer will reject a valid join order because its cost is infinity, and then end up completing without ever finding a valid plan--which is wrong. What we want is for the optimizer to accept the first valid plan that it finds, regardless of what the cost is. Then if it later finds a better plan, it can use that. So in several places the d1007_followup_v1.patch adds a check to see if bestCost is uninitialized and, if so, we'll always accept the first valid join order we find, regardless of what its cost is--even if it's infinity--because that's better than no plan at all. 3) Modifies the "compare" method in CostEstimateImpl.java to try to account for comparisons between two plans that both have infinite costs. If this happens, we don't have much choice but to guess as to which plan is actually better. So the changes for followup_v1 make that guess based on a comparison of row counts for the two plans. And if the row counts themselves are infinity, then we'll guess based on the single scan row counts. And finally, if those values are both infinity, as well, then we're out of luck and we just say that the two costs are "equal" for lack of better alternative. 4) And finally, due to unexpected behavior that results from arithmetic using infinity (see DERBY-1259), it is currently possible (though rather rare) for the optimizer to decide to do a hash join that has a cost estimate of Infinity. An example of a query for which this could happen can be found in DERBY-1205, query #1. That said, the BackingStoreHashtable that is used for carrying out a hash join currently creates a Java Hashtable instance with a capacity that matches the optimizer's estimated row count. So if the row count is infinity we'll try to create a Hashtable with some impossibly large capacity and, as a result, we'll end up with an OutOfMemory error. So the d1007_followup_v1.patch adds some code to handle this kind of situation in a more graceful manner. I ran derbyall with these changes on Linux Red Hat using ibm142 and saw no new failures. Submitted by Army Brown (qozinx@gmail.com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@397675 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/store/access/BackingStoreHashtable.java", "hunks": [ { "added": [ " /* We want to create the hash table based on the estimated row", " * count if a) we have an estimated row count (i.e. it's greater", " * than zero) and b) we think we can create a hash table to", " * hold the estimated row count without running out of memory.", " * The check for \"b\" is required because, for deeply nested", " * queries and/or queries with a high number of tables in", " * their FROM lists, the optimizer can end up calculating", " * some very high row count estimates--even up to the point of", " * Double.POSITIVE_INFINITY (see DERBY-1259 for an explanation", " * of how that can happen). In that case any attempts to", " * create a Hashtable of size estimated_rowcnt can cause", " * OutOfMemory errors when we try to create the Hashtable.", " * So as a \"red flag\" for that kind of situation, we check to", " * see if the estimated row count is greater than the max", " * in-memory size for this table. Unit-wise this comparison", " * is relatively meaningless: rows vs bytes. But if our", " * estimated row count is greater than the max number of", " * in-memory bytes that we're allowed to consume, then", " * it's very likely that creating a Hashtable with a capacity", " * of estimated_rowcnt will lead to memory problems. So in", " * that particular case we leave hash_table null here and", " * initialize it further below, using the estimated in-memory", " * size of the first row to figure out what a reasonable size", " * for the Hashtable might be.", " */", " (((estimated_rowcnt <= 0) || (row_source == null)) ?", " new Hashtable() :", " (estimated_rowcnt < max_inmemory_size) ?", " new Hashtable((int) estimated_rowcnt) :", " null);" ], "header": "@@ -224,9 +224,37 @@ public class BackingStoreHashtable", "removed": [ " ((estimated_rowcnt <= 0) ? ", " new Hashtable() : new Hashtable((int) estimated_rowcnt));" ] }, { "added": [ " // If we haven't initialized the hash_table yet then that's", " // because a Hashtable with capacity estimated_rowcnt would", " // probably cause memory problems. So look at the first row", " // that we found and use that to create the hash table with", " // an initial capacity such that, if it was completely full,", " // it would still satisfy the max_inmemory condition. Note", " // that this isn't a hard limit--the hash table can grow if", " // needed.", " if (hash_table == null)", " {", "\t\t\t\t\t// Check to see how much memory we think the first row", " // is going to take, and then use that to set the initial", " // capacity of the Hashtable.", " double rowUsage = getEstimatedMemUsage(row);", " hash_table = new Hashtable((int)(max_inmemory_size / rowUsage));", " }" ], "header": "@@ -235,6 +263,22 @@ public class BackingStoreHashtable", "removed": [] }, { "added": [ " max_inmemory_size -= getEstimatedMemUsage(row);" ], "header": "@@ -387,13 +431,7 @@ public class BackingStoreHashtable", "removed": [ " for( int i = 0; i < row.length; i++)", " {", " if( row[i] instanceof DataValueDescriptor)", " max_inmemory_size -= ((DataValueDescriptor) row[i]).estimateMemoryUsage();", " max_inmemory_size -= ClassSize.refSize;", " }", " max_inmemory_size -= ClassSize.refSize;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/OptimizerImpl.java", "hunks": [ { "added": [ "\t\t\t\t**", "\t\t\t\t** For very deeply nested queries, it's possible that the optimizer", "\t\t\t\t** will return an estimated cost of Double.INFINITY, which is", "\t\t\t\t** greater than our uninitialized cost of Double.MAX_VALUE and", "\t\t\t\t** thus the \"compare\" check below will return false. So we have", "\t\t\t\t** to check to see if bestCost is uninitialized and, if so, we", "\t\t\t\t** save currentCost regardless of what value it is--because we", "\t\t\t\t** haven't found anything better yet.", "\t\t\t\t**", "\t\t\t\t** That said, it's also possible for bestCost to be infinity", "\t\t\t\t** AND for current cost to be infinity, as well. In that case", "\t\t\t\t** we can't really tell much by comparing the two, so for lack", "\t\t\t\t** of better alternative we look at the row counts. See", "\t\t\t\t** CostEstimateImpl.compare() for more.", "\t\t\t\tif ((! foundABestPlan) ||", "\t\t\t\t\t(currentCost.compare(bestCost) < 0) ||", "\t\t\t\t\tbestCost.isUninitialized())" ], "header": "@@ -1368,8 +1368,24 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t\t\t\tif ((! foundABestPlan) || currentCost.compare(bestCost) < 0)" ] }, { "added": [ "\t\t\t\t\t\tif ((currentSortAvoidanceCost.compare(bestCost) <= 0)", "\t\t\t\t\t\t\t|| bestCost.isUninitialized())" ], "header": "@@ -1414,7 +1430,8 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t\t\t\t\t\tif (currentSortAvoidanceCost.compare(bestCost) <= 0)" ] }, { "added": [ "\t\t// Before considering the cost, make sure we set the optimizable's", "\t\t// \"current\" cost to be the one that we found. Doing this allows", "\t\t// us to compare \"current\" with \"best\" later on to find out if", "\t\t// the \"current\" plan is also the \"best\" one this round--if it's", "\t\t// not then we'll have to revert back to whatever the best plan is.", "\t\t// That check is performed in getNextDecoratedPermutation() of", "\t\t// this class.", "\t\toptimizable.getCurrentAccessPath().setCostEstimate(estimatedCost);", "" ], "header": "@@ -1776,6 +1793,15 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t// RESOLVE: The following call to memoryUsageOK does not behave", "\t\t// correctly if outerCost.rowCount() is POSITIVE_INFINITY; see", "\t\t// DERBY-1259." ], "header": "@@ -1783,6 +1809,9 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\tbestCostEstimate.isUninitialized() ||" ], "header": "@@ -1797,6 +1826,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\t\t\t\tbestCostEstimate.isUninitialized() ||" ], "header": "@@ -1844,6 +1874,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "", " // RESOLVE: The following call to memoryUsageOK does not behave", " // correctly if outerCost.rowCount() is POSITIVE_INFINITY; see", " // DERBY-1259." ], "header": "@@ -1912,6 +1943,10 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\tbestCostEstimate.isUninitialized() ||" ], "header": "@@ -1935,6 +1970,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ProjectRestrictNode.java", "hunks": [ { "added": [ "", "\t\t\t// Note: we don't call \"optimizer.considerCost()\" here because", "\t\t\t// a) the child will make that call as part of its own", "\t\t\t// \"optimizeIt()\" work above, and b) the child might have", "\t\t\t// different criteria for \"considering\" (i.e. rejecting or", "\t\t\t// accepting) a plan's cost than this ProjectRestrictNode does--", "\t\t\t// and we don't want to override the child's decision. So as", "\t\t\t// with most operations in this class, if the child is an", "\t\t\t// Optimizable, we just let it do its own work and make its", "\t\t\t// own decisions." ], "header": "@@ -324,7 +324,16 @@ public class ProjectRestrictNode extends SingleChildResultSetNode", "removed": [ "\t\t\toptimizer.considerCost(this, restrictionList, getCostEstimate(), outerCost);" ] } ] } ]
derby-DERBY-1214-bb7eb4d3
DERBY-1214: Anurag's derby-1214_2.diff patch. This forwards new JDBC4 calls on Pooled and XA objects to the appropriate worker objects. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@408733 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection40.java", "hunks": [ { "added": [ " throws SQLException { ", " try {", " return getRealConnection().createArray (typeName, elements);", " } catch (SQLException sqle) {", " notifyException(sqle);", " throw sqle;", " }" ], "header": "@@ -43,8 +43,13 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [ " throws SQLException {", " throw Util.notImplemented();" ] }, { "added": [ " try {", " return getRealConnection().createNClob();", " } catch (SQLException sqle) {", " notifyException(sqle);", " throw sqle;", " }", " try {", " return getRealConnection().createSQLXML ();", " } catch (SQLException sqle) {", " notifyException(sqle);", " throw sqle;", " }", " throws SQLException {", " try {", " return getRealConnection().createStruct (typeName, attributes);", " } catch (SQLException sqle) {", " notifyException(sqle);", " throw sqle;", " }" ], "header": "@@ -101,16 +106,31 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [ " throw Util.notImplemented();", " throw Util.notImplemented();", " throws SQLException {", " throw Util.notImplemented();" ] }, { "added": [ " public final BrokeredStatement newBrokeredStatement", " try {", " return new BrokeredStatement40(statementControl, getJDBCLevel());", " } catch (SQLException sqle) {", " notifyException(sqle);", " throw sqle;", " }", " public final BrokeredPreparedStatement newBrokeredStatement(BrokeredStatementControl statementControl, String sql, Object generatedKeys) throws SQLException {", " try {", " return new BrokeredPreparedStatement40(statementControl, getJDBCLevel(), sql, generatedKeys);", " } catch (SQLException sqle) {", " notifyException(sqle);", " throw sqle;", " }", " public final BrokeredCallableStatement newBrokeredStatement(BrokeredStatementControl statementControl, String sql) throws SQLException {", " try {", " return new BrokeredCallableStatement40(statementControl, getJDBCLevel(), sql);", " } catch (SQLException sqle) {", " notifyException(sqle);", " throw sqle;", " }" ], "header": "@@ -249,15 +269,30 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [ " public BrokeredStatement newBrokeredStatement", "\t\treturn new BrokeredStatement40(statementControl, getJDBCLevel());", " public BrokeredPreparedStatement newBrokeredStatement(BrokeredStatementControl statementControl, String sql, Object generatedKeys) throws SQLException {", " return new BrokeredPreparedStatement40(statementControl, getJDBCLevel(), sql, generatedKeys);", " public BrokeredCallableStatement newBrokeredStatement(BrokeredStatementControl statementControl, String sql) throws SQLException {", " return new BrokeredCallableStatement40(statementControl, getJDBCLevel(), sql);" ] }, { "added": [ " public final java.util.Map<String,Class<?>> getTypeMap() throws SQLException {" ], "header": "@@ -266,7 +301,7 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [ " public java.util.Map<String,Class<?>> getTypeMap() throws SQLException {" ] }, { "added": [ " final int getJDBCLevel() { return 4;}" ], "header": "@@ -275,7 +310,7 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [ " int getJDBCLevel() { return 4;}" ] }, { "added": [ " public final boolean isWrapperFor(Class<?> interfaces) throws SQLException {" ], "header": "@@ -288,7 +323,7 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [ " public boolean isWrapperFor(Class<?> interfaces) throws SQLException {" ] }, { "added": [ " public final <T> T unwrap(java.lang.Class<T> interfaces) " ], "header": "@@ -301,7 +336,7 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [ " public <T> T unwrap(java.lang.Class<T> interfaces) " ] } ] }, { "file": "java/testing/org/apache/derbyTesting/functionTests/harness/RunList.java", "hunks": [ { "added": [ " static String junitxasingle;// Run junit test cases with under ", " // single branck xa transaction" ], "header": "@@ -119,6 +119,8 @@ public class RunList", "removed": [] }, { "added": [ " ", " if (junitxasingle != null)", " jvmProps.addElement (\"junit.xa.single=\" + junitxasingle);" ], "header": "@@ -440,6 +442,9 @@ public class RunList", "removed": [] } ] }, { "file": "java/testing/org/apache/derbyTesting/functionTests/util/TestConfiguration.java", "hunks": [ { "added": [ " singleLegXA = Boolean.valueOf(props.getProperty(KEY_SINGLE_LEG_XA)", " ).booleanValue();" ], "header": "@@ -50,6 +50,8 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " /**", " * Return if it has to run under single legged xa transaction", " * @return singleLegXA", " */", " public boolean isSingleLegXA () {", " return singleLegXA;", " }", " " ], "header": "@@ -228,6 +230,14 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " private final boolean singleLegXA;" ], "header": "@@ -240,6 +250,7 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " private final static String KEY_VERBOSE = \"derby.tests.debug\"; ", " private final static String KEY_SINGLE_LEG_XA = \"junit.xa.single\";" ], "header": "@@ -260,7 +271,8 @@ public class TestConfiguration {", "removed": [ " private final static String KEY_VERBOSE = \"derby.tests.debug\";" ] } ] } ]
derby-DERBY-1214-e570c314
DERBY-1214: Commit Anurag's derby-1214_3.diff, changing the name of a test harness property. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@409420 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/functionTests/harness/RunList.java", "hunks": [ { "added": [ " static String derbyTestingXaSingle;// Run junit test cases with under " ], "header": "@@ -119,7 +119,7 @@ public class RunList", "removed": [ " static String junitxasingle;// Run junit test cases with under " ] }, { "added": [ " if (derbyTestingXaSingle != null)", " jvmProps.addElement (\"derbyTesting.xa.single=\" + derbyTestingXaSingle);" ], "header": "@@ -443,8 +443,8 @@ public class RunList", "removed": [ " if (junitxasingle != null)", " jvmProps.addElement (\"junit.xa.single=\" + junitxasingle);" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/functionTests/util/TestConfiguration.java", "hunks": [ { "added": [ " private final static String KEY_SINGLE_LEG_XA = \"derbyTesting.xa.single\";" ], "header": "@@ -272,7 +272,7 @@ public class TestConfiguration {", "removed": [ " private final static String KEY_SINGLE_LEG_XA = \"junit.xa.single\";" ] } ] } ]
derby-DERBY-1219-25d6720b
DERBY-1219: jdbcapi/checkDataSource test hangs intermittently with client This patch contributed by Deepa Remesh (dremesh@gmail.com). This patch enables the checkDataSource and checkDataSource30 tests to run with the client framework by removing the code which shuts down the system in the middle of the test. This is the code which causes the intermediate hang. The hanging problem has been logged as a separate issue, DERBY-1326. The shutdown is a valuable part of the test because it verifies that the global transaction state is valid even after the database has been shut down, so once the hang problem has been resolved, this test should be modified again to re-enable the shutdown processing with the client framework. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@406776 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1224-765e2c45
DERBY-1224: Remove unused variables in DatabaseMetaData Removing the following variables from org.apache.derby.client.am.DatabaseMetaData lastGetColumnPrivilegesResultSet_ lastGetColumnsResultSet_ lastGetForeignKeysResultSet_ lastGetPrimaryKeysResultSet_ lastGetProcedureColumnsResultSet_ lastGetProceduresResultSet_ lastGetSpecialColumnsResultSet_ lastGetStatisticsResultSet_ lastGetTablePrivilegesResultSet_ lastGetTablesResultSet_ lastGetUDTsResultSet_ lastGetTypeInfoResultSet_ lastGetAttrResultSet_ lastGetSuperTypesResultSet_ lastGetSuperTablesResultSet_ since they seem to have no purpose. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@394861 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/DatabaseMetaData.java", "hunks": [ { "added": [], "header": "@@ -68,22 +68,6 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " private ResultSet lastGetColumnPrivilegesResultSet_ = null;", " private ResultSet lastGetColumnsResultSet_ = null;", " private ResultSet lastGetForeignKeysResultSet_ = null;", " private ResultSet lastGetPrimaryKeysResultSet_ = null;", " private ResultSet lastGetProcedureColumnsResultSet_ = null;", " private ResultSet lastGetProceduresResultSet_ = null;", " private ResultSet lastGetSpecialColumnsResultSet_ = null;", " private ResultSet lastGetStatisticsResultSet_ = null;", " private ResultSet lastGetTablePrivilegesResultSet_ = null;", " private ResultSet lastGetTablesResultSet_ = null;", " private ResultSet lastGetUDTsResultSet_ = null;", " private ResultSet lastGetTypeInfoResultSet_ = null;", " private ResultSet lastGetAttrResultSet_ = null;", " private ResultSet lastGetSuperTypesResultSet_ = null;", " private ResultSet lastGetSuperTablesResultSet_ = null;", "" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1067,8 +1051,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetProceduresResultSet_ = executeCatalogQuery(cs);", " return lastGetProceduresResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1114,8 +1097,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetProcedureColumnsResultSet_ = executeCatalogQuery(cs);", " return lastGetProcedureColumnsResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1192,8 +1174,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetProceduresResultSet_ = executeCatalogQuery(cs);", " return lastGetProceduresResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1271,8 +1252,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetTablesResultSet_ = executeCatalogQuery(cs);", " return lastGetTablesResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1378,8 +1358,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetTablesResultSet_ = executeCatalogQuery(cs);", " return lastGetTablesResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1424,8 +1403,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetColumnsResultSet_ = executeCatalogQuery(cs);", " return lastGetColumnsResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1475,8 +1453,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetColumnPrivilegesResultSet_ = executeCatalogQuery(cs);", " return lastGetColumnPrivilegesResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1517,8 +1494,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetTablePrivilegesResultSet_ = executeCatalogQuery(cs);", " return lastGetTablePrivilegesResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1580,8 +1556,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetSpecialColumnsResultSet_ = executeCatalogQuery(cs);", " return lastGetSpecialColumnsResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1624,8 +1599,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetSpecialColumnsResultSet_ = executeCatalogQuery(cs);", " return lastGetSpecialColumnsResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1671,8 +1645,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetPrimaryKeysResultSet_ = executeCatalogQuery(cs);", " return lastGetPrimaryKeysResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1724,8 +1697,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetForeignKeysResultSet_ = executeCatalogQuery(cs);", " return lastGetForeignKeysResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1777,8 +1749,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetForeignKeysResultSet_ = executeCatalogQuery(cs);", " return lastGetForeignKeysResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1845,8 +1816,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetForeignKeysResultSet_ = executeCatalogQuery(cs);", " return lastGetForeignKeysResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1879,8 +1849,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetTypeInfoResultSet_ = executeCatalogQuery(cs);", " return lastGetTypeInfoResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1944,8 +1913,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetStatisticsResultSet_ = executeCatalogQuery(cs);", " return lastGetStatisticsResultSet_;" ] }, { "added": [ " return executeCatalogQuery(cs);" ], "header": "@@ -1993,8 +1961,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetUDTsResultSet_ = executeCatalogQuery(cs);", " return lastGetUDTsResultSet_;" ] }, { "added": [ " return ps.executeQueryX();" ], "header": "@@ -2082,8 +2049,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetSuperTypesResultSet_ = ps.executeQueryX();", " return lastGetSuperTypesResultSet_;" ] }, { "added": [ " return ps.executeQueryX();" ], "header": "@@ -2112,8 +2078,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetSuperTablesResultSet_ = ps.executeQueryX();", " return lastGetSuperTablesResultSet_;" ] }, { "added": [ " return ps.executeQueryX();" ], "header": "@@ -2161,8 +2126,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [ " lastGetAttrResultSet_ = ps.executeQueryX();", " return lastGetAttrResultSet_;" ] } ] } ]
derby-DERBY-1229-f64222cd
DERBY-1229: sysinfo and sysinfo_withProperties tests fail with SecurityException This change modifies the handling of security exceptions in the Sysinfo tool. If a security exception occurs while processing an element in the classpath, sysinfo will now format that security exception's message into the output, and then will continue on to process the other elements in the classpath. This ensures that sysinfo provides more detailed and more complete information about the classpath information in the environment. Also, the sed properties for the sysinfo tests are now updated to mask out the getProtectionDomain security exception, since that is an expected exception in these tests (in fact, there are many expected security exceptions in these tests). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@400366 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/tools/org/apache/derby/impl/tools/sysinfo/Main.java", "hunks": [ { "added": [ "\t\t\t\t\tZipInfoProperties zip = null;", "\t\t\t\t\ttry {", "\t\t\t\t\t\tzip = checkForInfo(cp[i]);", "\t\t\t\t\t}", "\t\t\t\t\tcatch (SecurityException se)", "\t\t\t\t\t{", "\t\t\t\t\t\tzip = new ZipInfoProperties(null);", "\t\t\t\t\t\tzip.setLocation(", "\t\t\t\t\t\t\tMain.getTextMessage (\"SIF03.C\", se.getMessage()));", "\t\t\t\t\t}" ], "header": "@@ -753,7 +753,16 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ "\t\t\t\t\tZipInfoProperties zip = checkForInfo(cp[i]);" ] } ] } ]
derby-DERBY-1231-dd611b80
DERBY-1231 LIKE does not match empty strings when used with a prepared statement. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@482983 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/Like.java", "hunks": [ { "added": [ "\t * clauses. It can if the pattern is non-null and if the length == 0 or" ], "header": "@@ -668,7 +668,7 @@ public class Like {", "removed": [ "\t * clauses. It can if the pattern is non-null, of length > 0 and" ] }, { "added": [ "\t\tif (pattern == null)", " if (pattern.length() == 0) {", " return true;", " }", "" ], "header": "@@ -678,11 +678,15 @@ public class Like {", "removed": [ "\t\tif (pattern == null || (pattern.length() == 0))" ] }, { "added": [], "header": "@@ -871,7 +875,6 @@ public class Like {", "removed": [ "\t * (NOTE: This may be null if the pattern is an empty string.)" ] }, { "added": [], "header": "@@ -888,12 +891,6 @@ public class Like {", "removed": [ "\t\tif (pattern.length() == 0)", "\t\t{", "\t\t\t// pattern is \"\"", "\t\t\treturn null;", "\t\t}", "" ] }, { "added": [ "\t\t *\t\"\"\t\t\t\tSUPER_STRING (match against super string)" ], "header": "@@ -911,7 +908,7 @@ public class Like {", "removed": [ "\t\t *\t\"\"\t\t\t\tnull" ] }, { "added": [ "\t\t// Pattern is empty or starts with wildcard." ], "header": "@@ -934,7 +931,7 @@ public class Like {", "removed": [ "\t\t// Pattern starts with wildcard." ] } ] } ]
derby-DERBY-1234-050b6163
DERBY-1234 (partial): Verify that we raise SQLException when calling methods on closed java.sql objects Added checkStatus() to the EmbedXXXStatement methods that don't check whether the statement is closed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@397186 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedCallableStatement20.java", "hunks": [ { "added": [ "\t\tcheckStatus();" ], "header": "@@ -132,6 +132,7 @@ public class EmbedCallableStatement20", "removed": [] }, { "added": [ "\t\tcheckStatus();" ], "header": "@@ -968,6 +969,7 @@ public class EmbedCallableStatement20", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedCallableStatement40.java", "hunks": [ { "added": [ " checkStatus();" ], "header": "@@ -208,6 +208,7 @@ public class EmbedCallableStatement40 extends EmbedCallableStatement30 {", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedPreparedStatement.java", "hunks": [ { "added": [ "\t\tcheckStatus();" ], "header": "@@ -632,6 +632,7 @@ public abstract class EmbedPreparedStatement", "removed": [] }, { "added": [ " private void setCharacterStreamInternal(int parameterIndex," ], "header": "@@ -647,15 +648,13 @@ public abstract class EmbedPreparedStatement", "removed": [ " protected void setCharacterStreamInternal(int parameterIndex,", " ", " checkStatus();" ] }, { "added": [ "\t\tcheckStatus();", "" ], "header": "@@ -749,6 +748,8 @@ public abstract class EmbedPreparedStatement", "removed": [] }, { "added": [ " private void setBinaryStreamInternal(int parameterIndex, InputStream x," ], "header": "@@ -763,7 +764,7 @@ public abstract class EmbedPreparedStatement", "removed": [ " protected void setBinaryStreamInternal(int parameterIndex, InputStream x," ] }, { "added": [], "header": "@@ -771,7 +772,6 @@ public abstract class EmbedPreparedStatement", "removed": [ "\t\tcheckStatus();" ] }, { "added": [ " checkStatus();" ], "header": "@@ -1223,6 +1223,7 @@ public abstract class EmbedPreparedStatement", "removed": [] }, { "added": [ " checkStatus();" ], "header": "@@ -1256,6 +1257,7 @@ public abstract class EmbedPreparedStatement", "removed": [] }, { "added": [ " checkStatus();" ], "header": "@@ -1539,6 +1541,7 @@ public abstract class EmbedPreparedStatement", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedPreparedStatement40.java", "hunks": [ { "added": [ " checkStatus();" ], "header": "@@ -96,6 +96,7 @@ public class EmbedPreparedStatement40 extends EmbedPreparedStatement30 {", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement.java", "hunks": [ { "added": [ " checkStatus();" ], "header": "@@ -401,6 +401,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [] }, { "added": [ "\t\tif (sql == null) {", "\t\t\tthrow newSQLException(SQLState.NULL_SQL_TEXT);", "\t\t}" ], "header": "@@ -541,14 +542,12 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "", "\t// if sql is null, raise an error", "\tif (sql == null)", " \t\tthrow newSQLException(SQLState.NULL_SQL_TEXT);", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement40.java", "hunks": [ { "added": [ " checkStatus();" ], "header": "@@ -55,6 +55,7 @@ public class EmbedStatement40 extends EmbedStatement {", "removed": [] } ] } ]
derby-DERBY-1234-1861dba6
DERBY-1234 (partial) Make the methods in EmbedConnection raise an exception when the connection is closed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@399604 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java", "hunks": [ { "added": [ "\t\tcheckIfClosed();" ], "header": "@@ -517,8 +517,7 @@ public class EmbedConnection implements EngineConnection", "removed": [ "\t\tif (isClosed())", "\t\t\tthrow Util.noCurrentConnection();" ] }, { "added": [ "\t\tcheckIfClosed();" ], "header": "@@ -795,8 +794,7 @@ public class EmbedConnection implements EngineConnection", "removed": [ "\t\tif (SanityManager.DEBUG)", "\t\t\tSanityManager.ASSERT(!isClosed(), \"connection is closed\");" ] }, { "added": [ " public String nativeSQL(String sql) throws SQLException {", " checkIfClosed();" ], "header": "@@ -824,7 +822,8 @@ public class EmbedConnection implements EngineConnection", "removed": [ " public String nativeSQL(String sql) {" ] }, { "added": [ "\t\tcheckIfClosed();" ], "header": "@@ -851,6 +850,7 @@ public class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " public boolean getAutoCommit() throws SQLException {", " checkIfClosed();" ], "header": "@@ -870,7 +870,8 @@ public class EmbedConnection implements EngineConnection", "removed": [ " public boolean getAutoCommit() {" ] }, { "added": [ " checkIfClosed();" ], "header": "@@ -1043,8 +1044,7 @@ public class EmbedConnection implements EngineConnection", "removed": [ "\t\tif (isClosed())", "\t\t\tthrow Util.noCurrentConnection();" ] }, { "added": [ "\tpublic final int getHoldability() throws SQLException {", "\t\tcheckIfClosed();" ], "header": "@@ -1068,8 +1068,8 @@ public class EmbedConnection implements EngineConnection", "removed": [ "\tpublic final int getHoldability()", "\t{" ] }, { "added": [ "\tpublic final void setHoldability(int holdability) throws SQLException {", "\t\tcheckIfClosed();" ], "header": "@@ -1083,8 +1083,8 @@ public class EmbedConnection implements EngineConnection", "removed": [ "\tpublic final void setHoldability(int holdability)", "\t{" ] }, { "added": [ "\t\tcheckIfClosed();" ], "header": "@@ -1122,6 +1122,7 @@ public class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " checkIfClosed();" ], "header": "@@ -1133,6 +1134,7 @@ public class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ "\t\tcheckIfClosed();" ], "header": "@@ -1144,6 +1146,7 @@ public class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " checkIfClosed();" ], "header": "@@ -1211,7 +1214,7 @@ public class EmbedConnection implements EngineConnection", "removed": [ "" ] }, { "added": [ "\tpublic final synchronized SQLWarning getWarnings() throws SQLException {", "\t\tcheckIfClosed();" ], "header": "@@ -1227,7 +1230,8 @@ public class EmbedConnection implements EngineConnection", "removed": [ "\tpublic final synchronized SQLWarning getWarnings() {" ] }, { "added": [ " public final synchronized void clearWarnings() throws SQLException {", " checkIfClosed();" ], "header": "@@ -1238,7 +1242,8 @@ public class EmbedConnection implements EngineConnection", "removed": [ " public final synchronized void clearWarnings() {" ] }, { "added": [ " public java.util.Map getTypeMap() throws SQLException {", " checkIfClosed();" ], "header": "@@ -1255,7 +1260,8 @@ public class EmbedConnection implements EngineConnection", "removed": [ " public java.util.Map getTypeMap() {" ] }, { "added": [ " checkIfClosed();" ], "header": "@@ -1268,7 +1274,7 @@ public class EmbedConnection implements EngineConnection", "removed": [ "" ] }, { "added": [ " /**", " * Raises an exception if the connection is closed.", " *", " * @exception SQLException if the connection is closed", " */", " protected final void checkIfClosed() throws SQLException {", " if (isClosed()) {", " throw Util.noCurrentConnection();", " }", " }", "" ], "header": "@@ -1319,6 +1325,17 @@ public class EmbedConnection implements EngineConnection", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection40.java", "hunks": [ { "added": [ "import java.util.HashMap;", "import java.util.Map;" ], "header": "@@ -30,6 +30,8 @@ import java.sql.QueryObjectFactory;", "removed": [] }, { "added": [ "", " /**", " * Returns the type map for this connection.", " *", " * @return type map for this connection", " * @exception SQLException if a database access error occurs", " */", " public final Map<String, Class<?>> getTypeMap() throws SQLException {", " // This method is already implemented with a non-generic", " // signature in EmbedConnection. We could just use that method", " // directly, but then we get a compiler warning (unchecked", " // cast/conversion). Copy the map to avoid the compiler", " // warning.", " Map typeMap = super.getTypeMap();", " if (typeMap == null) return null;", " Map<String, Class<?>> genericTypeMap = new HashMap<String, Class<?>>();", " for (Object key : typeMap.keySet()) {", " genericTypeMap.put((String) key, (Class) typeMap.get(key));", " }", " return genericTypeMap;", " }" ], "header": "@@ -128,6 +130,27 @@ public class EmbedConnection40 extends EmbedConnection30 {", "removed": [] }, { "added": [ " checkIfClosed();" ], "header": "@@ -137,6 +160,7 @@ public class EmbedConnection40 extends EmbedConnection30 {", "removed": [] }, { "added": [ " checkIfClosed();" ], "header": "@@ -152,6 +176,7 @@ public class EmbedConnection40 extends EmbedConnection30 {", "removed": [] } ] } ]
derby-DERBY-1234-26583066
DERBY-1234 (partial) Verify that we raise SQLException when calling methods on closed java.sql objects New patch (part5) which makes all the BrokeredXXX classes check whether the object is closed. What the patch does, is: 1) Implement checkIfClosed() and call it where needed. 2) Implement BrokeredStatement.isClosed() which was needed by checkIfClosed(). 3) Remove isWrapperFor() from BrokeredStatement40, BrokeredPreparedStatement40 and BrokeredCallableStatement40 and replace it by one shared method (which does the right thing when the object is closed) in BrokeredStatement With this patch, jdbc4/ClosedObjectTest.junit runs cleanly on embedded. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@400287 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredCallableStatement40.java", "hunks": [ { "added": [ " * Checks if the statement is closed.", " *", " * @return <code>true</code> if the statement is closed,", " * <code>false</code> otherwise", " * @exception SQLException if an error occurs", " public final boolean isClosed() throws SQLException {", " return getCallableStatement().isClosed();" ], "header": "@@ -176,18 +176,14 @@ public class BrokeredCallableStatement40 extends BrokeredCallableStatement30{", "removed": [ " * Returns false unless <code>interfaces</code> is implemented ", " * ", " * @param interfaces a Class defining an interface.", " * @return true if this implements the interface or ", " * directly or indirectly wraps an object ", " * that does.", " * @throws java.sql.SQLException if an error occurs while determining ", " * whether this is a wrapper for an object ", " * with the given interface.", " public boolean isWrapperFor(Class<?> interfaces) throws SQLException {", " return interfaces.isInstance(this);" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection40.java", "hunks": [ { "added": [ " /**", " * Returns the type map for this connection.", " *", " * @return type map for this connection", " * @exception SQLException if a database access error occurs", " */", " try {", " return getRealConnection().getTypeMap();", " } catch (SQLException se) {", " notifyException(se);", " throw se;", " }" ], "header": "@@ -118,8 +118,19 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [ " throw Util.notImplemented();" ] }, { "added": [ " checkIfClosed();" ], "header": "@@ -136,6 +147,7 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredPreparedStatement40.java", "hunks": [ { "added": [ " * Checks if the statement is closed.", " *", " * @return <code>true</code> if the statement is closed,", " * <code>false</code> otherwise", " * @exception SQLException if an error occurs", " public final boolean isClosed() throws SQLException {", " return getPreparedStatement().isClosed();" ], "header": "@@ -70,18 +70,14 @@ public class BrokeredPreparedStatement40 extends BrokeredPreparedStatement30{", "removed": [ " * Returns false unless <code>interfaces</code> is implemented ", " * ", " * @param interfaces a Class defining an interface.", " * @return true if this implements the interface or ", " * directly or indirectly wraps an object ", " * that does.", " * @throws java.sql.SQLException if an error occurs while determining ", " * whether this is a wrapper for an object ", " * with the given interface.", " public boolean isWrapperFor(Class<?> interfaces) throws SQLException {", " return interfaces.isInstance(this);" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredStatement40.java", "hunks": [ { "added": [ " * Checks if the statement is closed.", " *", " * @return <code>true</code> if the statement is closed,", " * <code>false</code> otherwise", " * @exception SQLException if an error occurs", " public final boolean isClosed() throws SQLException {", " return getStatement().isClosed();" ], "header": "@@ -41,18 +41,14 @@ public class BrokeredStatement40 extends BrokeredStatement {", "removed": [ " * Returns false unless <code>interfaces</code> is implemented ", " * ", " * @param interfaces a Class defining an interface.", " * @return true if this implements the interface or ", " * directly or indirectly wraps an object ", " * that does.", " * @throws java.sql.SQLException if an error occurs while determining ", " * whether this is a wrapper for an object ", " * with the given interface.", " public boolean isWrapperFor(Class<?> interfaces) throws SQLException {", " return interfaces.isInstance(this);" ] }, { "added": [ " checkIfClosed();" ], "header": "@@ -65,6 +61,7 @@ public class BrokeredStatement40 extends BrokeredStatement {", "removed": [] } ] } ]
derby-DERBY-1234-320777fd
DERBY-1234: Verify that we raise SQLException when calling methods on closed java.sql objects Final patch for DERBY-1234. The patch makes the methods in the client driver check whether the object is closed. With this patch, jdbcapi/StmtCloseFunTest.java has the same behaviour with the client driver as with the embedded driver so the DerbyNetClient canon could be removed. Also, jdbc4/ClosedObjectTest.junit runs cleanly both on embedded and on the client, and it has been added to the jdbc40 suite. In addition to adding checkForClosedXXX() calls, this patch a) implements Connection.getTypeMap() with the correct generic signature in NetConnection40 and LogicalConnection40 to avoid compiler warnings b) implements isWrapperFor() (with check for closed object) in Statement, so that it could be removed from Statement40, PreparedStatement40 and CallableStatement40 git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@406280 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/CallableStatement.java", "hunks": [ { "added": [ " java.sql.Date result = getDate(parameterIndex);" ], "header": "@@ -653,11 +653,11 @@ public class CallableStatement extends PreparedStatement", "removed": [ " java.sql.Date result = getDate(parameterIndex);" ] }, { "added": [ " java.sql.Time result = getTime(parameterIndex);" ], "header": "@@ -711,11 +711,11 @@ public class CallableStatement extends PreparedStatement", "removed": [ " java.sql.Time result = getTime(parameterIndex);" ] }, { "added": [ " java.sql.Timestamp result = getTimestamp(parameterIndex);" ], "header": "@@ -768,11 +768,11 @@ public class CallableStatement extends PreparedStatement", "removed": [ " java.sql.Timestamp result = getTimestamp(parameterIndex);" ] } ] }, { "file": "java/client/org/apache/derby/client/am/CallableStatement40.java", "hunks": [ { "added": [], "header": "@@ -192,21 +192,6 @@ public class CallableStatement40 extends org.apache.derby.client.am.CallableStat", "removed": [ " /**", " * Returns false unless <code>interfaces</code> is implemented ", " * ", " * @param interfaces a Class defining an interface.", " * @return true if this implements the interface or ", " * directly or indirectly wraps an object ", " * that does.", " * @throws java.sql.SQLException if an error occurs while determining ", " * whether this is a wrapper for an object ", " * with the given interface.", " */", " public boolean isWrapperFor(Class<?> interfaces) throws SQLException {", " return interfaces.isInstance(this);", " }", " " ] } ] }, { "file": "java/client/org/apache/derby/client/am/Connection.java", "hunks": [ { "added": [ " checkForClosedConnection();" ], "header": "@@ -535,6 +535,7 @@ public abstract class Connection implements java.sql.Connection,", "removed": [] }, { "added": [], "header": "@@ -542,7 +543,6 @@ public abstract class Connection implements java.sql.Connection,", "removed": [ " checkForClosedConnection();" ] }, { "added": [ " checkForClosedConnection();" ], "header": "@@ -642,9 +642,9 @@ public abstract class Connection implements java.sql.Connection,", "removed": [ " checkForClosedConnection();" ] }, { "added": [ " public java.sql.SQLWarning getWarnings() throws SQLException {", " try {", " checkForClosedConnection();", " } catch (SqlException se) {", " throw se.getSQLException();", " }" ], "header": "@@ -951,10 +951,15 @@ public abstract class Connection implements java.sql.Connection,", "removed": [ " public java.sql.SQLWarning getWarnings() {" ] } ] }, { "file": "java/client/org/apache/derby/client/am/LogicalConnection40.java", "hunks": [ { "added": [ "import java.util.Map;" ], "header": "@@ -34,6 +34,7 @@ import java.util.Properties;", "removed": [] }, { "added": [ " /**", " * Returns the type map for this connection.", " *", " * @return type map for this connection", " * @exception SQLException if a database access error occurs", " */", " public Map<String, Class<?>> getTypeMap() throws SQLException {", " checkForNullPhysicalConnection();", " return ((java.sql.Connection) physicalConnection_).getTypeMap();", " }" ], "header": "@@ -96,6 +97,16 @@ public class LogicalConnection40", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/am/PreparedStatement.java", "hunks": [ { "added": [ " checkForClosedStatement();" ], "header": "@@ -611,6 +611,7 @@ public class PreparedStatement extends Statement", "removed": [] }, { "added": [ " checkForClosedStatement();" ], "header": "@@ -635,6 +636,7 @@ public class PreparedStatement extends Statement", "removed": [] }, { "added": [ " checkForClosedStatement();" ], "header": "@@ -691,6 +693,7 @@ public class PreparedStatement extends Statement", "removed": [] }, { "added": [ " checkForClosedStatement();" ], "header": "@@ -750,6 +753,7 @@ public class PreparedStatement extends Statement", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/am/PreparedStatement40.java", "hunks": [ { "added": [], "header": "@@ -123,20 +123,6 @@ public class PreparedStatement40 extends org.apache.derby.client.am.PreparedSt", "removed": [ " /**", " * Returns false unless <code>interfaces</code> is implemented ", " * ", " * @param interfaces a Class defining an interface.", " * @return true if this implements the interface or ", " * directly or indirectly wraps an object ", " * that does.", " * @throws java.sql.SQLException if an error occurs while determining ", " * whether this is a wrapper for an object ", " * with the given interface.", " */", " public boolean isWrapperFor(Class<?> interfaces) throws SQLException {", " return interfaces.isInstance(this);", " }" ] } ] }, { "file": "java/client/org/apache/derby/client/am/ResultSet.java", "hunks": [ { "added": [ " java.sql.Date date = getDate(column);" ], "header": "@@ -881,11 +881,11 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " java.sql.Date date = getDate(column);" ] }, { "added": [ " java.sql.Time time = getTime(column);" ], "header": "@@ -946,11 +946,11 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " java.sql.Time time = getTime(column);" ] } ] }, { "file": "java/client/org/apache/derby/client/am/Statement.java", "hunks": [ { "added": [ " /**", " * Returns false unless <code>iface</code> is implemented ", " * ", " * @param iface a Class defining an interface.", " * @return true if this implements the interface or ", " * directly or indirectly wraps an object ", " * that does.", " * @throws java.sql.SQLException if an error occurs while determining ", " * whether this is a wrapper for an object ", " * with the given interface.", " */", " public boolean isWrapperFor(Class iface) throws SQLException {", " try {", " checkForClosedStatement();", " } catch (SqlException se) {", " throw se.getSQLException();", " }", " return iface.isInstance(this);", " }", "" ], "header": "@@ -480,6 +480,26 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [] }, { "added": [ " try {", " checkForClosedStatement();", " } catch (SqlException se) {", " throw se.getSQLException();", " }" ], "header": "@@ -751,6 +771,11 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/am/Statement40.java", "hunks": [ { "added": [], "header": "@@ -59,21 +59,6 @@ public class Statement40 extends Statement{", "removed": [ " /**", " * Returns false unless <code>interfaces</code> is implemented ", " * ", " * @param interfaces a Class defining an interface.", " * @return true if this implements the interface or ", " * directly or indirectly wraps an object ", " * that does.", " * @throws java.sql.SQLException if an error occurs while determining ", " * whether this is a wrapper for an object ", " * with the given interface.", " */", " public boolean isWrapperFor(Class<?> interfaces) throws SQLException {", " return interfaces.isInstance(this);", " }", " " ] } ] }, { "file": "java/client/org/apache/derby/client/net/NetConnection40.java", "hunks": [ { "added": [ "import java.util.HashMap;", "import java.util.Map;" ], "header": "@@ -34,9 +34,10 @@ import java.sql.ResultSet;", "removed": [ "import org.apache.derby.jdbc.InternalDriver;" ] }, { "added": [ " try {", " checkForClosedConnection();", " } catch (SqlException se) {", " throw se.getSQLException();", " }" ], "header": "@@ -140,6 +141,11 @@ public class NetConnection40 extends org.apache.derby.client.net.NetConnection", "removed": [] }, { "added": [ " try {", " checkForClosedConnection();", " } catch (SqlException se) {", " throw se.getSQLException();", " }" ], "header": "@@ -154,6 +160,11 @@ public class NetConnection40 extends org.apache.derby.client.net.NetConnection", "removed": [] }, { "added": [ " /**", " * Returns the type map for this connection.", " *", " * @return type map for this connection", " * @exception SQLException if a database access error occurs", " */", " public final Map<String, Class<?>> getTypeMap() throws SQLException {", " // This method is already implemented with a non-generic", " // signature in am/Connection. We could just use that method", " // directly, but then we get a compiler warning (unchecked", " // cast/conversion). Copy the map to avoid the compiler", " // warning.", " Map typeMap = super.getTypeMap();", " if (typeMap == null) return null;", " Map<String, Class<?>> genericTypeMap = new HashMap<String, Class<?>>();", " for (Object key : typeMap.keySet()) {", " genericTypeMap.put((String) key, (Class) typeMap.get(key));", " }", " return genericTypeMap;", " }", "" ], "header": "@@ -276,6 +287,27 @@ public class NetConnection40 extends org.apache.derby.client.net.NetConnection", "removed": [] }, { "added": [ " try {", " checkForClosedConnection();", " } catch (SqlException se) {", " throw se.getSQLException();", " }" ], "header": "@@ -284,6 +316,11 @@ public class NetConnection40 extends org.apache.derby.client.net.NetConnection", "removed": [] }, { "added": [ " try {", " checkForClosedConnection();", " } catch (SqlException se) {", " throw se.getSQLException();", " }" ], "header": "@@ -299,6 +336,11 @@ public class NetConnection40 extends org.apache.derby.client.net.NetConnection", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/NetResultSet40.java", "hunks": [ { "added": [ " try {", " checkForClosedResultSet();", " } catch (SqlException se) {", " throw se.getSQLException();", " }" ], "header": "@@ -158,6 +158,11 @@ public class NetResultSet40 extends NetResultSet{", "removed": [] } ] } ]
derby-DERBY-1234-3631eb02
DERBY-1234: Make all methods in EmbedResultSet, EmbedResultSet20 and EmbedResultSet40 invoke checkIfClosed(). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@398837 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java", "hunks": [ { "added": [ " checkIfClosed(\"getString\");" ], "header": "@@ -636,6 +636,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"getBoolean\");" ], "header": "@@ -670,7 +671,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ "\t\tcheckIfClosed(\"getByte\");" ], "header": "@@ -694,7 +695,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ "\t\tcheckIfClosed(\"getShort\");" ], "header": "@@ -717,7 +718,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ "\t\tcheckIfClosed(\"getInt\");" ], "header": "@@ -741,7 +742,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ "\t\tcheckIfClosed(\"getLong\");" ], "header": "@@ -764,7 +765,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ "\t\tcheckIfClosed(\"getFloat\");" ], "header": "@@ -787,7 +788,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ "\t\tcheckIfClosed(\"getDouble\");" ], "header": "@@ -810,7 +811,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ "\t\tcheckIfClosed(\"getBytes\");" ], "header": "@@ -834,7 +835,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ "\t\tcheckIfClosed(\"getDate\");" ], "header": "@@ -912,6 +913,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getDate\");" ], "header": "@@ -945,6 +947,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"getTime\");" ], "header": "@@ -964,6 +967,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getTime\");" ], "header": "@@ -996,6 +1000,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getTimestamp\");" ], "header": "@@ -1015,6 +1020,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"getTimestamp\");" ], "header": "@@ -1034,6 +1040,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"getCharacterStream\");" ], "header": "@@ -1060,6 +1067,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"getAsciiStream\");" ], "header": "@@ -1146,7 +1154,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ "\t\tcheckIfClosed(\"getBinaryStream\");" ], "header": "@@ -1185,7 +1193,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ " checkIfClosed(\"getString\");" ], "header": "@@ -1261,6 +1269,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getBoolean\");" ], "header": "@@ -1272,6 +1281,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getByte\");" ], "header": "@@ -1283,6 +1293,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getShort\");" ], "header": "@@ -1294,6 +1305,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getInt\");" ], "header": "@@ -1305,6 +1317,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getLong\");" ], "header": "@@ -1316,6 +1329,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getFloat\");" ], "header": "@@ -1327,6 +1341,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getDouble\");" ], "header": "@@ -1338,6 +1353,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getBytes\");" ], "header": "@@ -1350,6 +1366,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getDate\");" ], "header": "@@ -1361,6 +1378,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getTime\");" ], "header": "@@ -1372,6 +1390,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getTimestamp\");" ], "header": "@@ -1383,6 +1402,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getCharacterStream\");" ], "header": "@@ -1395,6 +1415,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getAsciiStream\");" ], "header": "@@ -1415,6 +1436,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getBinaryStream\");" ], "header": "@@ -1434,6 +1456,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " checkIfClosed(\"getObject\");" ], "header": "@@ -1606,7 +1629,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] }, { "added": [ " checkIfClosed(\"getObject\");" ], "header": "@@ -1671,6 +1694,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [], "header": "@@ -1871,8 +1895,6 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\tcheckIfClosed(\"getRow\");", "" ] }, { "added": [], "header": "@@ -1982,7 +2004,6 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\tcheckIfClosed(\"setFetchDirection\");" ] }, { "added": [ "\t\tcheckIfClosed(\"updateNull\");" ], "header": "@@ -2961,6 +2982,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateBoolean\");" ], "header": "@@ -2982,6 +3004,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateByte\");" ], "header": "@@ -3003,6 +3026,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateShort\");" ], "header": "@@ -3024,6 +3048,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateInt\");" ], "header": "@@ -3045,6 +3070,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateLong\");" ], "header": "@@ -3066,6 +3092,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateFloat\");" ], "header": "@@ -3087,6 +3114,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateDouble\");" ], "header": "@@ -3108,6 +3136,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateString\");" ], "header": "@@ -3129,6 +3158,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateBytes\");" ], "header": "@@ -3150,6 +3180,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateDate\");" ], "header": "@@ -3172,6 +3203,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateTime\");" ], "header": "@@ -3194,6 +3226,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateTimestamp\");" ], "header": "@@ -3216,6 +3249,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateAsciiStream\");" ], "header": "@@ -3240,6 +3274,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateBinaryStream\");" ], "header": "@@ -3264,6 +3299,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateCharacterStream\");" ], "header": "@@ -3288,6 +3324,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateObject\");" ], "header": "@@ -3310,6 +3347,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateObject\");" ], "header": "@@ -3331,6 +3369,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"getBlob\");" ], "header": "@@ -3849,6 +3888,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"getClob\");" ], "header": "@@ -3863,6 +3903,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateBlob\");" ], "header": "@@ -3912,6 +3953,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckIfClosed(\"updateClob\");" ], "header": "@@ -3966,6 +4008,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " /**", " * Checks if the result set has a scrollable cursor.", " *", " * @param methodName name of the method which requests the check", " * @exception SQLException if the result set is closed or its type", " * is <code>TYPE_FORWARD_ONLY</code>", " */", "\t\tcheckIfClosed(methodName);" ], "header": "@@ -4219,8 +4262,15 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet20.java", "hunks": [ { "added": [ "\t\t\tcheckIfClosed(\"getBigDecimal\");" ], "header": "@@ -106,7 +106,7 @@ public class EmbedResultSet20", "removed": [ "" ] }, { "added": [ "\t\t\tcheckIfClosed(\"getBigDecimal\");" ], "header": "@@ -131,6 +131,7 @@ public class EmbedResultSet20", "removed": [] }, { "added": [ " checkIfClosed(\"getBigDecimal\");", " return getBigDecimal(findColumnName(columnName));", " }" ], "header": "@@ -161,8 +162,9 @@ public class EmbedResultSet20", "removed": [ " return getBigDecimal(findColumnName(columnName));", " }" ] }, { "added": [ " checkIfClosed(\"updateBigDecimal\");" ], "header": "@@ -220,6 +222,7 @@ public class EmbedResultSet20", "removed": [] }, { "added": [ " checkIfClosed(\"getObject\");" ], "header": "@@ -236,6 +239,7 @@ public class EmbedResultSet20", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet40.java", "hunks": [ { "added": [ " checkIfClosed(\"isWrapperFor\");" ], "header": "@@ -142,6 +142,7 @@ public class EmbedResultSet40 extends org.apache.derby.impl.jdbc.EmbedResultSet2", "removed": [] }, { "added": [ " checkIfClosed(\"unwrap\");" ], "header": "@@ -155,6 +156,7 @@ public class EmbedResultSet40 extends org.apache.derby.impl.jdbc.EmbedResultSet2", "removed": [] } ] } ]
derby-DERBY-1235-7eed9148
DERBY-1235: Move isPoolable() and setPoolable() from PreparedStatement to Statement Moved setPoolable() and isPoolable() from PreparedStatement to Statement and implemented the methods in BrokeredStatement. Contributed by Dyre Tjeldvoll. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@398940 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/PreparedStatement.java", "hunks": [ { "added": [], "header": "@@ -53,10 +53,6 @@ public class PreparedStatement extends Statement", "removed": [ " // By default a PreparedStatement is poolable when it is created", " //required for jdbc4.0 methods", " private boolean isPoolable = true; ", "" ] }, { "added": [ " // PreparedStatement is poolable by default", " isPoolable = true;" ], "header": "@@ -120,6 +116,8 @@ public class PreparedStatement extends Statement", "removed": [] }, { "added": [ " int type, int concurrency, int holdability, ", " int autoGeneratedKeys, String[] columnNames) ", " throws SqlException {", " super(agent, connection, type, concurrency, holdability, ", " autoGeneratedKeys, columnNames);", " // PreparedStatement is poolable by default", " isPoolable = true;" ], "header": "@@ -145,8 +143,13 @@ public class PreparedStatement extends Statement", "removed": [ " int type, int concurrency, int holdability, int autoGeneratedKeys, String[] columnNames) throws SqlException {", " super(agent, connection, type, concurrency, holdability, autoGeneratedKeys, columnNames);" ] } ] }, { "file": "java/client/org/apache/derby/client/am/Statement.java", "hunks": [ { "added": [ " // A Statement is NOT poolable by default. The constructor for", " // PreparedStatement overrides this.", " protected boolean isPoolable = false; " ], "header": "@@ -191,6 +191,9 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredCallableStatement40.java", "hunks": [ { "added": [], "header": "@@ -175,16 +175,6 @@ public class BrokeredCallableStatement40 extends BrokeredCallableStatement30{", "removed": [ " public void setPoolable(boolean poolable)", " throws SQLException{", " getPreparedStatement().setPoolable(poolable);", " }", " ", " public boolean isPoolable()", " throws SQLException{", " return getPreparedStatement().isPoolable();", " }", " " ] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredPreparedStatement40.java", "hunks": [ { "added": [], "header": "@@ -69,16 +69,6 @@ public class BrokeredPreparedStatement40 extends BrokeredPreparedStatement30{", "removed": [ " public void setPoolable(boolean poolable)", " throws SQLException{", " throw Util.notImplemented();", " }", " ", " public boolean isPoolable()", " throws SQLException{", " throw Util.notImplemented();", " }", " " ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedPreparedStatement.java", "hunks": [ { "added": [], "header": "@@ -98,10 +98,6 @@ public abstract class EmbedPreparedStatement", "removed": [ " // By default a PreparedStatement is poolable when it is created", " //required for jdbc4.0 methods ", " private boolean isPoolable = true;", "" ] }, { "added": [ "\t\t// PreparedStatement is poolable by default", "\t\tisPoolable = true;" ], "header": "@@ -115,6 +111,8 @@ public abstract class EmbedPreparedStatement", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement.java", "hunks": [ { "added": [ "\t// An EmbedStatement is NOT poolable by default. The constructor for", "\t// PreparedStatement overrides this.", "\tprotected boolean isPoolable = false;", "" ], "header": "@@ -109,6 +109,10 @@ public class EmbedStatement extends ConnectionChild", "removed": [] } ] } ]
derby-DERBY-1236-6b858b2c
DERBY-1236: Commit Dyre's new tests for setSavepoint(), derby-1236.v1.diff. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@396889 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1238-97cf6942
DERBY-1238: Kristian's derby-1238-1a.diff patch, adding createStruct() and createArray(). I also wired Knut Anders' VerifySignatures test into the jdbc40 suite because now it passes cleanly. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@399063 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/LogicalConnection40.java", "hunks": [ { "added": [ "import java.sql.Array;" ], "header": "@@ -20,6 +20,7 @@", "removed": [] }, { "added": [ "import java.sql.Struct;" ], "header": "@@ -27,6 +28,7 @@ import java.sql.Clob;", "removed": [] }, { "added": [ " public Array createArray(String typeName, Object[] elements)", " throws SQLException {", " throw SQLExceptionFactory.notImplemented(\"createArray(String,Object[])\");", " }", " " ], "header": "@@ -49,6 +51,11 @@ public class LogicalConnection40", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/NetConnection40.java", "hunks": [ { "added": [ "import java.sql.Array;" ], "header": "@@ -20,6 +20,7 @@", "removed": [] }, { "added": [ "import java.sql.Struct;" ], "header": "@@ -32,6 +33,7 @@ import java.sql.PreparedStatement;", "removed": [] }, { "added": [ " public Array createArray(String typeName, Object[] elements)", " throws SQLException {", " throw SQLExceptionFactory.notImplemented (\"createArray(String,Object[])\");", " }", "" ], "header": "@@ -89,6 +91,11 @@ public class NetConnection40 extends org.apache.derby.client.net.NetConnection", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection40.java", "hunks": [ { "added": [ "import java.sql.Array;" ], "header": "@@ -20,6 +20,7 @@", "removed": [] }, { "added": [ "import java.sql.Struct;" ], "header": "@@ -27,6 +28,7 @@ import java.sql.ClientInfoException;", "removed": [] }, { "added": [ " public Array createArray(String typeName, Object[] elements)", " throws SQLException {", " throw Util.notImplemented();", " }", " " ], "header": "@@ -39,6 +41,11 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection40.java", "hunks": [ { "added": [ "import java.sql.Array;" ], "header": "@@ -20,6 +20,7 @@", "removed": [] }, { "added": [ "import java.sql.Struct;" ], "header": "@@ -28,6 +29,7 @@ import java.sql.NClob;", "removed": [] }, { "added": [ " public Array createArray(String typeName, Object[] elements)", " throws SQLException {", " throw Util.notImplemented();", " }" ], "header": "@@ -53,6 +55,10 @@ public class EmbedConnection40 extends EmbedConnection30 {", "removed": [] } ] } ]
derby-DERBY-1240-bdf6536c
DERBY-1240 ( creating /restoring a db from backup using createFrom with log at different location copies the log from backup to the db dir also). Problem was restore code in the RawStore was copying all the files that does not exist in the database directory after the restore was completed by the data factory and log factory boot methods. Because the log is placed at different location when an external log location is specified, the log directory does not exist in the database directory. In this case code in the RawStore was incorrectly copying log files also because the log directory does not exist in the database directory. This patch fixes the problem by restoring only the files that are not restored by the other factories. Currently, they are jar files in the database and backup history file. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@466221 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/RawStore.java", "hunks": [ { "added": [ "\t\t// if this is a restore from backup, restore the jar files." ], "header": "@@ -220,13 +220,10 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "", "\t\t//save the service properties to a file if we are doing a restore from", "\t\t\t//copy the jar files.etc from backup if they don't exist", "\t\t\t((UpdateServiceProperties)properties).saveServiceProperties();" ] }, { "added": [ "\t\t// save the service properties to a file if we are doing a ", "\t\t// restore from. This marks the end of restore from backup.", "\t\tif (restoreFromBackup !=null)" ], "header": "@@ -264,16 +261,14 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t\t//save the service properties to a file if we are doing a restore from", "\t\tif(restoreFromBackup !=null)", "\t\t\t//copy the jar files.etc from backup if they don't exist", "\t\t\trestoreRemainingFromBackup(restoreFromBackup);", "" ] }, { "added": [ "\t/*", "\t * Restore any remaining files from backup that are not ", "\t * restored by the individual factories. ", "\t * 1) copy jar files from backup..", "\t * 2) copy backup history file. ", "\t */", "\tprivate void restoreRemainingFromBackup(String backupPath) ", "\t\tthrows StandardException ", "\t", "\t\t// if they are any jar files in the backup copy, ", "\t\t// copy them into the database directory, if they", "\t\t// are not already there. ", "\t\tFile backupJarDir = new File(backupPath, ", "\t\t\t\t\t\t\t\t\t FileResource.JAR_DIRECTORY_NAME);", "", "\t\tStorageFile dbJarDir = ", "\t\t\tstorageFactory.newStorageFile(FileResource.JAR_DIRECTORY_NAME);", "\t\t", "\t\tif (!privExists(dbJarDir) && privExists(backupJarDir)) ", "\t\t{", "\t\t\tif (!privCopyDirectory(backupJarDir, dbJarDir)) {", "\t\t\t\tthrow StandardException.newException(", " backupJarDir, dbJarDir);", "", "\t\t// copy the backup history file from the backup. ", "\t\tStorageFile dbHistoryFile = ", "\t\t\tstorageFactory.newStorageFile(BACKUP_HISTORY);", "\t\tFile backupHistoryFile = new File(backupPath, BACKUP_HISTORY);", "\t", "\t\t// if this is a roll-forward recovery, backup history file ", "\t\t// will already there in the database and will be the latest ", "\t\t// copy; if it exists, do not copy from backup.", "\t\tif (!privExists(dbHistoryFile))", "\t\t\tif (!privCopyFile(backupHistoryFile, dbHistoryFile))", "\t\t\t\tthrow StandardException. ", "\t\t\t\t\tnewException(SQLState.RAWSTORE_ERROR_COPYING_FILE,", "\t\t\t\t\t\t\t\t backupHistoryFile, dbHistoryFile); ", "" ], "header": "@@ -975,45 +970,51 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t//copies the files from the backup that does not need", "\t//any special handling like jars.", "\tprivate void restoreRemainingFromBackup(String backupPath) throws StandardException", "\t\t/** ", "\t\t *copy the files from the backup except the ones that we already", "\t\t *copied in the boot methods(like log directory and data segments)", "\t\t *AND Service.properties file which we create last to", "\t\t *indicate the end of copy from backup.", "\t\t */", "", "\t\tFile backuploc = new File(backupPath);", "\t\tString[] fromList = privList(backuploc);", "\t\tfor(int i =0 ; i < fromList.length ; i++)", "\t\t{", "\t\t\tStorageFile toFile = storageFactory.newStorageFile( fromList[i]);", "\t\t\tif(privExists(toFile) || ", "\t\t\t fromList[i].equals(PersistentService.PROPERTIES_NAME)){", "\t\t\t\tcontinue;", "\t\t\t}", "\t\t\tFile fromFile = new File(backuploc, fromList[i]);", "\t\t\tif(privIsDirectory(fromFile))", "\t\t\t{", "\t\t\t\tif (!privCopyDirectory(fromFile, toFile)){", "\t\t\t\t\tthrow StandardException.newException(", " fromFile, toFile);", "\t\t\t\t}", "\t\t\t}else{", "\t\t\t\tif (!privCopyFile(fromFile, toFile)){", "\t\t\t\t\tthrow StandardException.newException(", " SQLState.UNABLE_TO_COPY_FILE_FROM_BACKUP,", " fromFile, toFile);", "\t\t\t\t}" ] } ] } ]
derby-DERBY-1247-e2f6e00a
DERBY-1247 - Make DerbyNetAutostart encoding safe git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@397300 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1248-e48d46e7
Improved the comments in some of the routines while debugging a DERBY-1248. Separating the formatting/comment changes from the real fix. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@420892 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/RawStore.java", "hunks": [ { "added": [ "\t * duration of the backup. Stable copy is made of each page using " ], "header": "@@ -677,7 +677,7 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t * duration of the backup. Stable copy is made of each page using using " ] }, { "added": [ "\tpublic synchronized void backup(Transaction t, File backupDir) ", " // if backup dir does not exist, go ahead and create it.", "" ], "header": "@@ -685,12 +685,13 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\tpublic synchronized void backup(Transaction t, ", " File backupDir) " ] }, { "added": [ " // entity with backup name exists, make sure it is a directory.", "" ], "header": "@@ -700,6 +701,8 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [] }, { "added": [ " // is assumed to be a derby database directory because derby ", " // databases always have this file. ", " if (privExists(", " new File(backupDir, PersistentService.PROPERTIES_NAME))) ", " { " ], "header": "@@ -710,10 +713,12 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ " // is assumed as derby database directory because derby databases", " // always has this file. ", " if (privExists(new File(backupDir, PersistentService.PROPERTIES_NAME))) { " ] }, { "added": [ "\t\t\t// get name of the current db, ie. database directory of current db.", "\t\t\tStorageFile dbase = storageFactory.newStorageFile(null); ", " String canonicalDbName = storageFactory.getCanonicalName();", " int lastSep = ", " canonicalDbName.lastIndexOf(storageFactory.getSeparator());", "\t\t\tString dbname = ", " canonicalDbName.substring(lastSep + 1);", "\t\t\thistoryFile = ", " privFileWriter(", " storageFactory.newStorageFile(BACKUP_HISTORY), true);", "\t\t\tlogHistory(", " historyFile,", " MessageService.getTextMessage(", " MessageId.STORE_BACKUP_STARTED, ", " canonicalDbName, ", " getFilePath(backupcopy)));" ], "header": "@@ -732,22 +737,27 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "\t\t\t// first figure out our name", "\t\t\tStorageFile dbase = storageFactory.newStorageFile( null); // The database directory", " String canonicalDbName = storageFactory.getCanonicalName();", " int lastSep = canonicalDbName.lastIndexOf( storageFactory.getSeparator());", "\t\t\tString dbname = canonicalDbName.substring( lastSep + 1);", "\t\t\thistoryFile = privFileWriter( storageFactory.newStorageFile( BACKUP_HISTORY), true);", "\t\t\tlogHistory(historyFile,", " MessageService.getTextMessage(", " MessageId.STORE_BACKUP_STARTED, ", " canonicalDbName, ", " getFilePath(backupcopy)));" ] }, { "added": [ "" ], "header": "@@ -792,6 +802,7 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [] }, { "added": [ " if (privExists(jarDir)) ", " {", " // then copy only the plain files under those directories. One ", " // could just use the recursive copy of directory to copy all ", " // the files under the jar dir, but the problem with that is if", " // a user gives jar directory as the backup path by mistake, ", " // copy will fail while copying the backup dir onto itself in ", " // recursion" ], "header": "@@ -803,14 +814,16 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ " if (privExists(jarDir)) {", " // then copy only the plain files under those directories. One could", " // just use the recursive copy of directory to copy all the files", " // under the jar dir, but the problem with that is if a user ", " // gives jar directory as the backup path by mistake, copy will ", " // fail while copying the backup dir onto itself in recursion" ] }, { "added": [ " File backupJarSchemaDir = ", " new File(backupJarDir, jarSchemaList[i]);", "", " (byte[])null, null, false)) ", " {" ], "header": "@@ -827,9 +840,12 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ " File backupJarSchemaDir = new File(backupJarDir, jarSchemaList[i]);", " (byte[])null, null, false)) {" ] }, { "added": [ " try ", " {", " PersistentService ps = ", " Monitor.getMonitor().getServiceType(this);", " Properties prop = ", " ps.getServiceProperties(fullName, (Properties)null);", "", " storageFactory.newStorageFile(", " LogFactory.LOG_DIRECTORY_NAME);", " {", " SanityManager.ASSERT(", " prop.getProperty(Attribute.LOG_DEVICE) == null,", " \"cannot get rid of logDevice property\");", " }", "", " ps.saveServiceProperties(backupcopy.getPath(), prop, false);", " }", " catch(StandardException se) ", " {", " logHistory(", " historyFile,", " MessageService.getTextMessage(", " MessageId.STORE_ERROR_EDIT_SERVICEPROPS) + se);", " // Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE, this file should be", " if (privExists(verifyKeyFile)) ", " {", " new File(", " backupcopy, Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE);", "", "\t\t\tFile logBackup = ", " new File(backupcopy, LogFactory.LOG_DIRECTORY_NAME);" ], "header": "@@ -844,55 +860,69 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ " try {", " ", " PersistentService ps = Monitor.getMonitor().getServiceType(this);", " Properties prop = ps.getServiceProperties(fullName, (Properties)null);", " storageFactory.newStorageFile( LogFactory.LOG_DIRECTORY_NAME);", " SanityManager.ASSERT(prop.getProperty(Attribute.LOG_DEVICE) == null,", " \"cannot get rid of logDevice property\");", " ps.saveServiceProperties( backupcopy.getPath(), prop, false);", " }catch(StandardException se) {", " logHistory(historyFile,", " MessageService.getTextMessage(", " MessageId.STORE_ERROR_EDIT_SERVICEPROPS)", " + se);", " // Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE , this file should be", " if (privExists(verifyKeyFile)) {", " new File(backupcopy, Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE);", "\t\t\tFile logBackup = new File(backupcopy, LogFactory.LOG_DIRECTORY_NAME);" ] }, { "added": [], "header": "@@ -924,7 +954,6 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup", "removed": [ "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/log/LogToFile.java", "hunks": [ { "added": [ " /**", " * Return the \"oldest\" log file still needed by recovery. ", " * <p>", " * Returns the log file that contains the undoLWM, ie. the oldest", " * log record of all uncommitted transactions in the given checkpoint.", " * ", " * If no checkpoint is given then returns -1, indicating all log records", " * may be necessary.", " *", " **/", "\tprivate long getFirstLogNeeded(CheckpointOperation checkpoint)", " {", "\t\t\tfirstLogNeeded = ", " (checkpoint != null ? ", " LogCounter.getLogFileNumber(checkpoint.undoLWM()) : -1);", "\t\t\t\t\tSanityManager.DEBUG(DBG_FLAG, ", " \"truncatLog: undoLWM firstlog needed \" + firstLogNeeded);", " SanityManager.DEBUG(DBG_FLAG, ", " \"truncatLog: checkpoint truncationLWM firstlog needed \" +", " firstLogNeeded);", " SanityManager.DEBUG(DBG_FLAG, ", " \"truncatLog: firstLogFileNumber = \" + firstLogFileNumber);" ], "header": "@@ -2119,27 +2119,43 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\tprivate long getFirstLogNeeded(CheckpointOperation checkpoint){", "", "\t\t\tfirstLogNeeded = (checkpoint != null ? LogCounter.getLogFileNumber(checkpoint.undoLWM()) : -1);", "\t\t\t\t\tSanityManager.DEBUG(DBG_FLAG, \"truncatLog: undoLWM firstlog needed \" + firstLogNeeded);", "\t\t\t\tSanityManager.DEBUG(DBG_FLAG, \"truncatLog: checkpoint truncationLWM firstlog needed \" + firstLogNeeded);", "\t\t\t\tSanityManager.DEBUG(DBG_FLAG, \"truncatLog: firstLogFileNumber = \" + firstLogFileNumber);" ] }, { "added": [ "\t * Start the transaction log backup. ", " *", " * The transaction log is required to bring the database to the consistent ", " * state on restore. ", " *", "\t * must be kept around until they are copied into the backup,", "\t * Copy the log control files to the backup (the checkpoint recorded in the", " * control files is the backup checkpoint). Restore will use the checkpoint ", "\t * the database to the consistent state. ", " *", " * Find first log file that needs to be copied into the backup to bring ", " * the database to the consistent state on restore. ", " * In the end, existing log files that are needed to recover from the backup", " * this call are also copied into the backup after all the information ", " * in the data containers is written to the backup, when endLogBackup() " ], "header": "@@ -4472,24 +4488,27 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\t * start the transaction log backup, transaction log is is required", "\t * to bring the database to the consistent state on restore. ", "", "\t * should be kept around until they are copied into the backup,", "\t * copy the log control files to the backup (the checkpoint recorded in the", " * control files is the backup checkpoint), Restore will use the checkpoint ", "\t * the database to the consistent state. and find first log file ", "\t * that need to be copied into the backup to bring the database", "\t * to the consistent state on restore. ", " * In the end, existing log files that are needed to recover from the backup ", " * this call are copied into the backup after all the all the information ", " * in the data containers is written to the backup, when endLogBackup() " ] }, { "added": [ "\t\t\t\tthrow StandardException.newException(", " SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile, toFile);" ], "header": "@@ -4528,8 +4547,8 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\t\t\t\tthrow StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE,", "\t\t\t\t\t\t\t\t\t\t\t\t\t fromFile, toFile);" ] }, { "added": [ "\t\t\t\tthrow StandardException.newException(", " SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile, toFile);", "\t\t\t// find the first log file number that is active", "\t\t// copy all the log files that have to go into the backup ", "\t\tbackupLogFiles(toDir, getLogFileNumber() - 1);", " *", " * @param toDir - location to copy the log files to" ], "header": "@@ -4537,21 +4556,22 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\t\t\t\tthrow StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE,", "\t\t\t\t\t\t\t\t\t\t\t\t\t fromFile, toFile);", "\t\t\t// find the first log file number that is active", "\t\t// copy all the log files that has to go into the backup ", "\t\tbackupLogFiles(toDir, getLogFileNumber()-1);", " * @param toDir - location where the log files should be copied to." ] }, { "added": [ "\t\t\t\tthrow StandardException.newException(", " SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile, toFile);", "\t * copy all the log files that have to go into the backup" ], "header": "@@ -4564,15 +4584,15 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\t\t\t\tthrow StandardException.newException(SQLState.RAWSTORE_ERROR_COPYING_FILE,", "\t\t\t\t\t\t\t\t\t\t\t\t\t fromFile, toFile);", "\t * copy all the log files that has to go into the backup" ] }, { "added": [ "\t\t\t// when the log is being archived for roll-forward recovery", "\t\t\t// we would like to switch to a new log file.", "\t\t\t// otherwise during restore logfile in the backup could ", "\t\t\t// if we see a log file with fuzzy end, we think that is the ", "\t\t\tlastLogFileToBackup = getLogFileNumber() - 1;", "\t\t}", " else", "\t\t\t// for a plain online backup partial filled up log file is ok, " ], "header": "@@ -4583,19 +4603,20 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\t\t\t// when the log is being archived for roll-frward recovery", "\t\t\t// we would like to switch to a new log file.", "\t\t\t// otherwise during restore logfile in the backup could ", "\t\t\t// if we see a log file with fuzzy end , we think that is the ", "\t\t\tlastLogFileToBackup = getLogFileNumber()-1 ;", "\t\t}else", "\t\t\t// for a plain online backup partiall filled up log file is ok, " ] } ] } ]
derby-DERBY-125-1adb9d7f
DERBY-125 Network Server can send DSS greater than 32K to client, which breaks DRDA protocol. Fixes off by one error during segment shifting. Fixes continuation flag handling to not overwrite the length. See changes.html attached to DERBY-125 for more info Contributed by Bryan Pendleton git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@371603 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/DDMWriter.java", "hunks": [ { "added": [ "\t\t\t// Notes on the behavior of the Layer B segmenting loop below:", "\t\t\t//", "\t\t\t// We start with the right most chunk. For a 3-segment object we'd", "\t\t\t// shift 2 segments: shift the first (rightmost) one 4 bytes and ", "\t\t\t// the second one 2. Note that by 'first' we mean 'first time", "\t\t\t// through the loop', but that is actually the last segment", "\t\t\t// of data since we are moving right-to-left. For an object", "\t\t\t// of K segments we will pass through this loop K-1 times.", "\t\t\t// The 0th (leftmost) segment is not shifted, as it is", "\t\t\t// already in the right place. When we are done, we will", "\t\t\t// have made room in each segment for an additional", "\t\t\t// 2 bytes for the continuation header. Thus, each", "\t\t\t// segment K is shifted K*2 bytes to the right.", "\t\t\t//", "\t\t\t// Each time through the loop, \"dataByte\" points to the", "\t\t\t// last byte in the segment; \"dataToShift\" is the amount of", "\t\t\t// data that we need to shift, and \"shiftSize\" is the", "\t\t\t// distance that we need to shift it. Since dataByte points", "\t\t\t// at the last byte, not one byte beyond it (as with the", "\t\t\t// \"offset\" variable used elsewhere in DDMWriter), the start", "\t\t\t// of the segement is actually at (dataByte-dataToShift+1).", "\t\t\t//", "\t\t\t// After we have shifted the segment, we move back to the", "\t\t\t// start of the segment and set the value of the 2-byte DSS", "\t\t\t// continuation header, which needs to hold the length of", "\t\t\t// this segment's data, together with the continuation flag", "\t\t\t// if this is not the rightmost (passOne) segment.", "\t\t\t//", "\t\t\t// In general, each segment except the rightmost will contain", "\t\t\t// 32765 bytes of data, plus the 2-byte header, and its", "\t\t\t// continuation flag will be set, so the header value will", "\t\t\t// be 0xFFFF. The rightmost segment will not have the", "\t\t\t// continuation flag set, so its value may be anything from", "\t\t\t// 0x0001 to 0x7FFF, depending on the amount of data in that", "\t\t\t// segment.", "\t\t\t//", "\t\t\t// Note that the 0th (leftmost) segment also has a 2-byte", "\t\t\t// DSS header, which needs to have its continuation flag set.", "\t\t\t// This is done by resetting the \"totalSize\" variable below,", "\t\t\t// at which point that variable no longer holds the total size", "\t\t\t// of the object, but rather just the length of segment 0. The", "\t\t\t// total size of the object was written using extended length", "\t\t\t// bytes by the endDdm() method earlier.", "\t\t\t//", "\t\t\t// Additional information about this routine is available in the", "\t\t\t// bug notes for DERBY-125:", "\t\t\t// http://issues.apache.org/jira/browse/DERBY-125", "\t\t\t" ], "header": "@@ -1433,6 +1433,54 @@ class DDMWriter", "removed": [] }, { "added": [ "\t\t\t\tint startOfCopyData = dataByte - dataToShift + 1;" ], "header": "@@ -1441,12 +1489,7 @@ class DDMWriter", "removed": [ "\t\t\t\t// We start with the right most chunk. If we had to copy two", "\t\t\t\t// chunks we would shift the first one 4 bytes and then ", "\t\t\t\t// the second one", "\t\t\t\t// 2 when we come back on the next loop so they would each have", "\t\t\t\t// 2 bytes for the continuation header", "\t\t\t\tint startOfCopyData = dataByte - dataToShift;" ] }, { "added": [ "\t\t\t\t\ttwoByteContDssHeader = (twoByteContDssHeader |", "\t\t\t\t\t\tDssConstants.CONTINUATION_BIT);", "" ], "header": "@@ -1462,7 +1505,9 @@ class DDMWriter", "removed": [ "\t\t\t\t \ttwoByteContDssHeader = DssConstants.CONTINUATION_BIT;" ] } ] } ]
derby-DERBY-1251-a1c5d5f0
DERBY-1251: cancelRowUpdates() affects rows updated with updateRow() in scrollable updatable resultsets Fix contributed by Andreas Korneliussen. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@406279 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/ResultSet.java", "hunks": [ { "added": [], "header": "@@ -160,8 +160,6 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " // This variable helps keep track of whether cancelRowUpdates() should have any effect.", " protected boolean updateRowCalled_ = false;" ] }, { "added": [ " // cancelRowUpdates if updateCount_ != 1" ], "header": "@@ -3552,7 +3550,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " // cancelRowUpdates if updateCount_ != 1, else set updateRowCalled_ to true." ] }, { "added": [ " } finally {", " resetUpdatedColumns();", " // Ensure the data is reset", " if (preparedStatementForUpdate_.updateCount_ > 0) {", " // This causes a round-trip", " getAbsoluteRowset(absolutePosition_);", " }" ], "header": "@@ -3561,19 +3559,16 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateRowCalled_ = true;", " } catch (SqlException e) {", " try {", " cancelRowUpdates();", " } catch ( SQLException se ) {", " throw new SqlException(se);", " }", " throw e;", " // other result set types don't implement detectability", " cursor_.setIsRowUpdated(true);" ] }, { "added": [ " if (preparedStatementForDelete_.updateCount_ > 0) {", " ", " cursor_.isUpdateDeleteHoleCache_.set((int) currentRowInRowset_,", " Cursor.ROW_IS_NULL);", " cursor_.isUpdateDeleteHole_ = ", " ((Boolean) cursor_.isUpdateDeleteHoleCache_.", " get((int) currentRowInRowset_)).booleanValue();", " }" ], "header": "@@ -3624,12 +3619,17 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " Boolean nullIndicator = Cursor.ROW_IS_NULL;", " cursor_.isUpdateDeleteHoleCache_.set((int) currentRowInRowset_, nullIndicator);", " cursor_.isUpdateDeleteHole_ = ((Boolean) cursor_.isUpdateDeleteHoleCache_.get((int) currentRowInRowset_)).booleanValue();" ] }, { "added": [ " // Reset updated columns", " resetUpdatedColumns();" ], "header": "@@ -3690,12 +3690,8 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ "", " // if updateRow() has already been called, then cancelRowUpdates should have", " // no effect. updateRowCalled_ is reset to false as soon as the cursor moves to a new row.", " if (!updateRowCalled_) {", " resetUpdatedColumns();", " }" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.ExecutionFactory;" ], "header": "@@ -29,6 +29,7 @@ import org.apache.derby.iapi.sql.conn.StatementContext;", "removed": [] }, { "added": [ "\t/** ", "\t * The currentRow contains the data of the current row of the resultset.", "\t * If the containing row array is null, the cursor is not postioned on a ", "\t * row ", "\t */", "\tprivate final ExecRow currentRow;\t" ], "header": "@@ -97,10 +98,12 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t// mutable state", "\tprotected ExecRow currentRow;", "\t//deleteRow & updateRow make rowData null so that ResultSet is not positioned on deleted/updated row.", "\tprivate DataValueDescriptor[] rowData;" ] }, { "added": [], "header": "@@ -114,8 +117,6 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\tprivate ExecRow currentRowBeforeInsert;", "\tprivate ExecRow insertRow = null;" ] }, { "added": [ "\t/* updateRow is used to keep the values which are updated with updateXXX() ", "\t * calls. It is used by both insertRow() and updateRow(). ", "\t * It is initialized to null if the resultset is not updatable. ", "\t */", "\tprivate final ExecRow updateRow;", "\t", "\t/* These are the columns which have been updated so far. ", "\t */", "\tprivate boolean[] columnGotUpdated; " ], "header": "@@ -180,10 +181,15 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t//copyOfDatabaseRow will keep the original contents of the columns of the current row which got updated.", "\t//These will be used if user decides to cancel the changes made to the row using cancelRowUpdates.", "\tprivate DataValueDescriptor[] copyOfDatabaseRow;", "\tprivate boolean[] columnGotUpdated; //these are the columns which have been updated so far. Used to build UPDATE...WHERE CURRENT OF sql" ] }, { "added": [ "\t\tfinal ExecutionFactory factory = conn.getLanguageConnection().", "\t\t\tgetLanguageConnectionFactory().getExecutionFactory();", "\t\tfinal int columnCount = getMetaData().getColumnCount();", "\t\tthis.currentRow = factory.getValueRow(columnCount);", "\t\tcurrentRow.setRowArray(null);", "\t\t\t//initialize arrays related to updateRow implementation", "\t\t\tcolumnGotUpdated = new boolean[columnCount];", "\t\t\tupdateRow = factory.getValueRow(columnCount);", "\t\t\tfor (int i = 1; i <= columnCount; i++) {", "\t\t\t\tupdateRow.setColumn(i, resultDescription.getColumnDescriptor(i).", "\t\t\t\t\t\t\t\t\tgetType().getNull());", "\t\t\t}", "\t\t\tinitializeUpdateRowModifiers();", "\t\t} else {", "\t\t\tupdateRow = null;" ], "header": "@@ -240,14 +246,26 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t //initialize arrays related to updateRow implementation", "\t\t columnGotUpdated = new boolean[getMetaData().getColumnCount()];", "\t\t copyOfDatabaseRow = new DataValueDescriptor[columnGotUpdated.length];" ] }, { "added": [ "\t// checkOnRow protects us from making requests of", "\tprotected final void checkOnRow() throws SQLException ", "\t{", "\t\tif (currentRow.getRowArray() == null) {", "\t\t} ", "\t}", "\t/**", "\t * Initializes the currentRowHasBeenUpdated and columnGotUpdated fields", "\t */", "\tprivate void initializeUpdateRowModifiers() {", "\t\tcurrentRowHasBeenUpdated = false;", "\t\tArrays.fill(columnGotUpdated, false);" ], "header": "@@ -286,17 +304,22 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t// onRow protects us from making requests of", "\tprotected final DataValueDescriptor[] checkOnRow() throws SQLException\t{", "", "\t\tDataValueDescriptor[] theCurrentRow = rowData;", "", "\t\tif (theCurrentRow == null)", "\t\treturn theCurrentRow;" ] }, { "added": [ "\t\tif (!isOnInsertRow) checkOnRow(); // first make sure there's a row" ], "header": "@@ -305,7 +328,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\tcheckOnRow(); // first make sure there's a row" ] }, { "added": [], "header": "@@ -346,14 +369,6 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "", " if (columnGotUpdated != null)", " {", "\t //since we are moving off of the current row, need to initialize state corresponding to updateRow implementation", "\t Arrays.fill(columnGotUpdated, false);", "\t currentRowHasBeenUpdated = false;", " }", "" ] }, { "added": [ "\t\t\t\tfinal ExecRow newRow;" ], "header": "@@ -384,6 +399,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\t\t\t\t\tnewRow = theResults.setBeforeFirstRow();", "\t\t\t\t\t\tnewRow = theResults.getFirstRow();", "\t\t\t\t\t\tnewRow = theResults.getNextRow();", "\t\t\t\t\t\tnewRow = theResults.getLastRow();", "\t\t\t\t\t\tnewRow = theResults.setAfterLastRow();", "\t\t\t\t\t\tnewRow = theResults.getPreviousRow();", "\t\t\t\t\t\tnewRow = theResults.getAbsoluteRow(row);", "\t\t\t\t\t\tnewRow = theResults.getRelativeRow(row);", "\t\t\t\t\t\tnewRow = null;" ], "header": "@@ -401,38 +417,39 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\t\t\tcurrentRow = theResults.setBeforeFirstRow();", "\t\t\t\t\t\tcurrentRow = theResults.getFirstRow();", "\t\t\t\t\t\tcurrentRow = theResults.getNextRow();", "\t\t\t\t\t\tcurrentRow = theResults.getLastRow();", "\t\t\t\t\t\tcurrentRow = theResults.setAfterLastRow();", "\t\t\t\t\t\tcurrentRow = theResults.getPreviousRow();", "\t\t\t\t\t\tcurrentRow = theResults.getAbsoluteRow(row);", "\t\t\t\t\t\tcurrentRow = theResults.getRelativeRow(row);" ] }, { "added": [ "\t\t\t", "\t\t\tboolean onRow = (newRow!=null);", "\t\t\tif (onRow) {", "\t\t\t\tcurrentRow.setRowArray(newRow.getRowArray());", "\t\t\t} else {", "\t\t\t\tcurrentRow.setRowArray(null);", "\t\t\t}", "\t\t\t" ], "header": "@@ -458,8 +475,14 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "", "\t\t boolean onRow = (currentRow!=null);" ] }, { "added": [ "\t\t\tif (columnGotUpdated != null && currentRowHasBeenUpdated) {", "\t\t\t\tinitializeUpdateRowModifiers();", "\t\t\t}" ], "header": "@@ -496,11 +519,12 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\trowData = onRow ? currentRow.getRowArray() : null;", "\t\t\t" ] }, { "added": [ "\t\t\tcurrentRow.setRowArray(null);" ], "header": "@@ -595,8 +619,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\tcurrentRow = null;", "\t\t\trowData = null;" ] }, { "added": [ " if (!isOnInsertRow) checkOnRow(); // make sure there's a current row" ], "header": "@@ -2227,7 +2250,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " checkOnRow(); // first make sure there's a current row" ] }, { "added": [ " currentRowHasBeenUpdated = true;", " ", " return updateRow.getColumn(columnIndex);" ], "header": "@@ -2240,14 +2263,10 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " if (columnGotUpdated[columnIndex-1] == false) {//this is the first updateXXX call on this column", " //this is the first updateXXX method call on this column. Save the original content of the column into copyOfDatabaseRow", " //The saved copy of the column will be needed if cancelRowUpdates is issued", " copyOfDatabaseRow[columnIndex - 1] = currentRow.getColumn(columnIndex).getClone();", " }", "\t currentRowHasBeenUpdated = true;", " return currentRow.getColumn(columnIndex);" ] }, { "added": [ "\t\t\t\tDataValueDescriptor value = updateRow.getColumn(columnIndex);" ], "header": "@@ -2850,7 +2869,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\tDataValueDescriptor value = currentRow.getColumn(columnIndex);" ] }, { "added": [ " setValue(updateRow.getColumn(i));" ], "header": "@@ -3456,7 +3475,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " setValue(currentRow.getColumn(i));" ] }, { "added": [ " act.getParameterValueSet().getParameterForSet(paramPosition++).setValue(updateRow.getColumn(i));" ], "header": "@@ -3530,7 +3549,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " act.getParameterValueSet().getParameterForSet(paramPosition++).setValue(currentRow.getColumn(i));" ] }, { "added": [ " currentRow.setRowArray(null);", " } else {", " movePosition(RELATIVE, 0, \"relative\");" ], "header": "@@ -3542,8 +3561,9 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " rowData = null;", " currentRow = null;" ] }, { "added": [ " initializeUpdateRowModifiers();" ], "header": "@@ -3552,6 +3572,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " currentRow.setRowArray(null);", " initializeUpdateRowModifiers();" ], "header": "@@ -3600,13 +3621,13 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " rowData = null;", " currentRow = null;" ] }, { "added": [ " initializeUpdateRowModifiers(); ", " }" ], "header": "@@ -3663,17 +3684,9 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " ", " if (currentRowHasBeenUpdated == false) return; //nothing got updated on this row so cancelRowUpdates is a no-op in this case.", " for (int i=0; i < columnGotUpdated.length; i++){", " if (columnGotUpdated[i] == true) currentRow.setColumn(i+1, copyOfDatabaseRow[i]);//if column got updated, resotre the original data", " columnGotUpdated[i] = false;", " }", " currentRowHasBeenUpdated = false;", " //rowData needs to be refreshed with the currentRow otherwise it will continue to have changes made by updateXXX methods", " rowData = currentRow.getRowArray();", " }" ] }, { "added": [ "\t\t\t\tinitializeUpdateRowModifiers();", " \t\t\t\tisOnInsertRow = true;", "\t\t\t\t", "\t\t\t\t\tupdateRow.setColumn(i, " ], "header": "@@ -3703,30 +3716,13 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\tfor (int i=0; i < columnGotUpdated.length; i++) {", "\t\t\t\t\tcolumnGotUpdated[i] = false;", "\t\t\t\t}", "\t\t\t\tcurrentRowHasBeenUpdated = false;", "", "\t\t\t\t// Remember position", "\t\t\t\tif (!isOnInsertRow) {", "\t\t\t\t\tcurrentRowBeforeInsert = currentRow;", "\t\t\t\t}", "", "\t\t\t\tisOnInsertRow = true;", "", "\t\t\t\t// If insertRow has not been allocated yet, get new insertRow", "\t\t\t\tif (insertRow == null) {", "\t\t\t\t\tinsertRow = stmt.lcc.getExecutionContext().", "\t\t\t\t\t\tgetExecutionFactory().getValueRow(columnGotUpdated.length);", "\t\t\t\t}", "\t\t\t\t\tinsertRow.setColumn(i, ", "\t\t\t\t// Set currentRow to insertRow", "\t\t\t\tcurrentRow = insertRow;", "\t\t\t\trowData = currentRow.getRowArray();" ] }, { "added": [ "\t\t\t\t\tinitializeUpdateRowModifiers();" ], "header": "@@ -3753,20 +3749,8 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\t\t// Get position previous to moveToInsertRow", "\t\t\t\t\tcurrentRow = currentRowBeforeInsert;", "\t\t\t\t\tcurrentRowBeforeInsert = null;", "", "\t\t\t\t\tfor (int i=0; i < columnGotUpdated.length; i++) {", "\t\t\t\t\t\tcolumnGotUpdated[i] = false;", "\t\t\t\t\t}", "\t\t\t\t\tcurrentRowHasBeenUpdated = false;", "", "\t\t\t\t\t// Get rowData", "\t\t\t\t\tif (currentRow != null) {", "\t\t\t\t\t\trowData = currentRow.getRowArray();", "\t\t\t\t\t}" ] }, { "added": [ "\t\t\t\tDataValueDescriptor dvd = getColumn(columnIndex);" ], "header": "@@ -3803,7 +3787,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\tDataValueDescriptor dvd = currentRow.getColumn(columnIndex);" ] }, { "added": [ "\t\t\t\tDataValueDescriptor dvd = getColumn(columnIndex);" ], "header": "@@ -3855,7 +3839,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\tDataValueDescriptor dvd = currentRow.getColumn(columnIndex);" ] } ] } ]
derby-DERBY-1253-32f71fea
DERBY-1253: Dyre's derby-1253.v1.diff patch, which filters out optional methods from the output of the UnsupportedVetter test. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@399072 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1253-3d901b8a
DERBY-1253: Make UnsupportedVetter test smart enough to handle methods which change shape. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@412220 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1253-6da3fc6a
DERBY-1253: Commit bug1253_forwarding.diff. This forwards some miscellaneous JDBC4 calls to competent objects. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@408772 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/jdbc/EmbeddedConnectionPoolDataSource40.java", "hunks": [ { "added": [ "import org.apache.derby.impl.jdbc.Util;", "import org.apache.derby.iapi.reference.SQLState;", "" ], "header": "@@ -26,6 +26,9 @@ import java.sql.SQLException;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/jdbc/EmbeddedXADataSource40.java", "hunks": [ { "added": [ "import org.apache.derby.impl.jdbc.Util;", "import org.apache.derby.iapi.reference.SQLState;", "" ], "header": "@@ -29,6 +29,9 @@ import java.sql.SQLException;", "removed": [] }, { "added": [ " /**", " * Returns false unless <code>interfaces</code> is implemented ", " * ", " * @param interfaces a Class defining an interface.", " * @return true if this implements the interface or ", " * directly or indirectly wraps an object ", " * that does.", " * @throws java.sql.SQLException if an error occurs while determining ", " * whether this is a wrapper for an object ", " * with the given interface.", " */", " public boolean isWrapperFor(Class<?> interfaces) throws SQLException {", " return interfaces.isInstance(this);", " }", " ", " /**", " * Returns <code>this</code> if this class implements the interface", " *", " * @param interfaces a Class defining an interface", " * @return an object that implements the interface", " * @throws java.sql.SQLExption if no object if found that implements the ", " * interface", " */", " public <T> T unwrap(java.lang.Class<T> interfaces) ", " throws SQLException{", " //Derby does not implement non-standard methods on ", " //JDBC objects", " //hence return this if this class implements the interface ", " //or throw an SQLException", " try {", " return interfaces.cast(this);", " } catch (ClassCastException cce) {", " throw Util.generateCsSQLException(SQLState.UNABLE_TO_UNWRAP,", " interfaces);", " }", " }", "\t" ], "header": "@@ -61,6 +64,43 @@ public class EmbeddedXADataSource40 extends EmbeddedXADataSource {", "removed": [] } ] } ]
derby-DERBY-1253-7a1db7b6
DERBY-1253: Check in bug1253_wireIn.diff. This wires the UnsupportedVetter test into the jdbc4 suite. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@410694 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1253-88a21095
DERBY-1253: Commit bug1253_verifier4_networkResultSet. This changes the NotSupported exception raised by network ResultSets to be one of the UnimplementedFeature exceptions which are mapped to SQLFeatureNotSupportedException under JDBC4. Also added more excludable methods to the UnsupportedVetter test. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@400172 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/functionTests/harness/Sed.java", "hunks": [ { "added": [ "", "\t\t\t// Timestamp diagnostic looks a little different under jdk16", "\t\t\tsearchStrings.addElement(\"\\\\[\\\\.fffffffff\\\\]\");\t\t\t" ], "header": "@@ -196,6 +196,9 @@ public class Sed", "removed": [] } ] } ]
derby-DERBY-1253-b0551db0
DERBY-1253: Changes to the unsupported-method-verifier based on my review of the revised, draft JDBC4 Compliance chapter. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@399482 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1255-8846963e
DERBY-1255: Narayanan's ClobBlob_create_v1.diff patch. This adds the first increment of support for Connection.createClob() and createBlob(), new methods added in JDBC4. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@407549 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection40.java", "hunks": [ { "added": [ " /**", " *", " * Constructs an object that implements the <code>Blob</code> interface. The object", " * returned initially contains no data. The <code>setBinaryStream</code> and", " * <code>setBytes</code> methods of the <code>Blob</code> interface may be used to add data to", " * the <code>Blob</code>.", " *", " * @return An object that implements the <code>Blob</code> interface", " * @throws SQLException if an object that implements the", " * <code>Blob</code> interface can not be constructed, this method is ", " * called on a closed connection or a database access error occurs.", " *", " */", " if (isClosed()) {", " throw Util.noCurrentConnection();", " }", " // Forward the createBlob call to the physical connection", " try {", " return getRealConnection().createBlob();", " } catch (SQLException sqle) {", " notifyException(sqle);", " throw sqle;", " }", " /**", " *", " * Constructs an object that implements the <code>Clob</code> interface. The object", " * returned initially contains no data. The <code>setAsciiStream</code>,", " * <code>setCharacterStream</code> and <code>setString</code> methods of ", " * the <code>Clob</code> interface may be used to add data to the <code>Clob</code>.", " *", " * @return An object that implements the <code>Clob</code> interface", " * @throws SQLException if an object that implements the", " * <code>Clob</code> interface can not be constructed, this method is ", " * called on a closed connection or a database access error occurs.", " *", " */", " if (isClosed()) {", " throw Util.noCurrentConnection();", " }", " // Forward the createClob call to the physical connection", " try {", " return getRealConnection().createClob();", " } catch (SQLException sqle) {", " notifyException(sqle);", " throw sqle;", " }" ], "header": "@@ -46,12 +46,56 @@ public class BrokeredConnection40 extends BrokeredConnection30 {", "removed": [ " throw Util.notImplemented();", " throw Util.notImplemented();" ] } ] } ]
derby-DERBY-1258-79525626
DERBY-1258 Change the generated code for a new/old column in a row trigger to access columns by position and not name to avoid the case-insensitive name lookup specified by JDBC. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@397959 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/CreateTriggerNode.java", "hunks": [ { "added": [ "\t\t**\t\t\t\tgetObject(<colPosition>) AS DECIMAL(6,2))", " **", " ** Column position is used to avoid the wrong column being", " ** selected problem (DERBY-1258) caused by the case insensitive", " ** JDBC rules for fetching a column by name." ], "header": "@@ -587,7 +587,11 @@ public class CreateTriggerNode extends DDLStatementNode", "removed": [ "\t\t**\t\t\t\tgetObject('<colName>') AS DECIMAL(6,2))" ] }, { "added": [ "\t\tmethodCall.append(\"CAST (org.apache.derby.iapi.db.Factory::getTriggerExecutionContext().\");", "\t\tmethodCall.append(\".getObject(\");", " methodCall.append(colDesc.getPosition());", " methodCall.append(\") AS \");" ], "header": "@@ -599,9 +603,11 @@ public class CreateTriggerNode extends DDLStatementNode", "removed": [ "\t\tmethodCall.append(\"cast (org.apache.derby.iapi.db.Factory::getTriggerExecutionContext().\");", "\t\tmethodCall.append(\".getObject('\"+colName+\"') AS \");" ] } ] } ]
derby-DERBY-1259-e540aee4
DERBY-1007: Follow up patch to earlier submitted patch. In a word, the fix for this issue ensures that, in the case of subqueries, the optimizer will correctly propagate the estimated costs for subqueries up to the parent subquery(-ies), thus allowing the parent query to make a better decision about which join order is ultimately the best. As seen in the example scenario included above, the correct estimates are higher--sometimes much higher--than what the optimizer was returning prior to this change: in the example, the optimizer was returning an incorrect cost estimate of 10783 before the patch, and a correct estimate of 1 million after the patch (where "correct" means that it's the value calculated by the optimizer and thus the value that should be returned; I'm not saying anything about the accuracy of the estimate here). One side effect of this is that, for very deeply nested queries and/or queries with a high number of FROM tables/expressions, the higher cost estimates can be multiplied--sometimes many times over--throughout the optimization process, which means that the overall query estimate can climb to a much larger number much more quickly. If the query is big enough, this can actually cause the optimizer to reach an estimated cost of INFINITY. That said, the current optimizer logic for choosing a plan does not expect to see an estimate of infinity for its plans. As a result the optimizer does comparisons of, and arithmetic with, cost estimates and row counts that, when applied to Infinity, give unexpected results. I have filed DERBY-1259 and DERBY-1260 to address the "infinity problem" in more detail, but am attaching here a follow-up patch that takes some basic steps toward making the optimizer more robust in the face of infinite cost estimates, which are now more likely to occur given the DERBY-1007 changes. In particular, the d1007_followup_v1.patch does the following: 1) Fixes a couple of small problems with the handling of estimates for FromBaseTables, to ensure that a FromBaseTable's estimate is correctly propagated to (and handled by) the ProjectRestrictNode that sits above it. This parallels the original DERBY-1007 work but is a much simpler "follow-up" task as it deals only with base tables instead of subqueries, and thus the changes are fairly minor. 2) There are several places in OptimizerImpl where the optimizer will only choose to accept a plan's cost if the cost is less than the current "bestCost". If no best cost has been found yet, bestCost is set to an uninitialized value of Double.MAX_VALUE with the assumption that the first valid plan will have a cost less than Double.MAX_VALUE and thus will be chosen as the best so far. However, since a plan's cost estimate can actually end up being Double.POSITIVE_INFINITY, which is greater than Double.MAX_VALUE, it's possible that the optimizer will reject a valid join order because its cost is infinity, and then end up completing without ever finding a valid plan--which is wrong. What we want is for the optimizer to accept the first valid plan that it finds, regardless of what the cost is. Then if it later finds a better plan, it can use that. So in several places the d1007_followup_v1.patch adds a check to see if bestCost is uninitialized and, if so, we'll always accept the first valid join order we find, regardless of what its cost is--even if it's infinity--because that's better than no plan at all. 3) Modifies the "compare" method in CostEstimateImpl.java to try to account for comparisons between two plans that both have infinite costs. If this happens, we don't have much choice but to guess as to which plan is actually better. So the changes for followup_v1 make that guess based on a comparison of row counts for the two plans. And if the row counts themselves are infinity, then we'll guess based on the single scan row counts. And finally, if those values are both infinity, as well, then we're out of luck and we just say that the two costs are "equal" for lack of better alternative. 4) And finally, due to unexpected behavior that results from arithmetic using infinity (see DERBY-1259), it is currently possible (though rather rare) for the optimizer to decide to do a hash join that has a cost estimate of Infinity. An example of a query for which this could happen can be found in DERBY-1205, query #1. That said, the BackingStoreHashtable that is used for carrying out a hash join currently creates a Java Hashtable instance with a capacity that matches the optimizer's estimated row count. So if the row count is infinity we'll try to create a Hashtable with some impossibly large capacity and, as a result, we'll end up with an OutOfMemory error. So the d1007_followup_v1.patch adds some code to handle this kind of situation in a more graceful manner. I ran derbyall with these changes on Linux Red Hat using ibm142 and saw no new failures. Submitted by Army Brown (qozinx@gmail.com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@397675 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/store/access/BackingStoreHashtable.java", "hunks": [ { "added": [ " /* We want to create the hash table based on the estimated row", " * count if a) we have an estimated row count (i.e. it's greater", " * than zero) and b) we think we can create a hash table to", " * hold the estimated row count without running out of memory.", " * The check for \"b\" is required because, for deeply nested", " * queries and/or queries with a high number of tables in", " * their FROM lists, the optimizer can end up calculating", " * some very high row count estimates--even up to the point of", " * Double.POSITIVE_INFINITY (see DERBY-1259 for an explanation", " * of how that can happen). In that case any attempts to", " * create a Hashtable of size estimated_rowcnt can cause", " * OutOfMemory errors when we try to create the Hashtable.", " * So as a \"red flag\" for that kind of situation, we check to", " * see if the estimated row count is greater than the max", " * in-memory size for this table. Unit-wise this comparison", " * is relatively meaningless: rows vs bytes. But if our", " * estimated row count is greater than the max number of", " * in-memory bytes that we're allowed to consume, then", " * it's very likely that creating a Hashtable with a capacity", " * of estimated_rowcnt will lead to memory problems. So in", " * that particular case we leave hash_table null here and", " * initialize it further below, using the estimated in-memory", " * size of the first row to figure out what a reasonable size", " * for the Hashtable might be.", " */", " (((estimated_rowcnt <= 0) || (row_source == null)) ?", " new Hashtable() :", " (estimated_rowcnt < max_inmemory_size) ?", " new Hashtable((int) estimated_rowcnt) :", " null);" ], "header": "@@ -224,9 +224,37 @@ public class BackingStoreHashtable", "removed": [ " ((estimated_rowcnt <= 0) ? ", " new Hashtable() : new Hashtable((int) estimated_rowcnt));" ] }, { "added": [ " // If we haven't initialized the hash_table yet then that's", " // because a Hashtable with capacity estimated_rowcnt would", " // probably cause memory problems. So look at the first row", " // that we found and use that to create the hash table with", " // an initial capacity such that, if it was completely full,", " // it would still satisfy the max_inmemory condition. Note", " // that this isn't a hard limit--the hash table can grow if", " // needed.", " if (hash_table == null)", " {", "\t\t\t\t\t// Check to see how much memory we think the first row", " // is going to take, and then use that to set the initial", " // capacity of the Hashtable.", " double rowUsage = getEstimatedMemUsage(row);", " hash_table = new Hashtable((int)(max_inmemory_size / rowUsage));", " }" ], "header": "@@ -235,6 +263,22 @@ public class BackingStoreHashtable", "removed": [] }, { "added": [ " max_inmemory_size -= getEstimatedMemUsage(row);" ], "header": "@@ -387,13 +431,7 @@ public class BackingStoreHashtable", "removed": [ " for( int i = 0; i < row.length; i++)", " {", " if( row[i] instanceof DataValueDescriptor)", " max_inmemory_size -= ((DataValueDescriptor) row[i]).estimateMemoryUsage();", " max_inmemory_size -= ClassSize.refSize;", " }", " max_inmemory_size -= ClassSize.refSize;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/OptimizerImpl.java", "hunks": [ { "added": [ "\t\t\t\t**", "\t\t\t\t** For very deeply nested queries, it's possible that the optimizer", "\t\t\t\t** will return an estimated cost of Double.INFINITY, which is", "\t\t\t\t** greater than our uninitialized cost of Double.MAX_VALUE and", "\t\t\t\t** thus the \"compare\" check below will return false. So we have", "\t\t\t\t** to check to see if bestCost is uninitialized and, if so, we", "\t\t\t\t** save currentCost regardless of what value it is--because we", "\t\t\t\t** haven't found anything better yet.", "\t\t\t\t**", "\t\t\t\t** That said, it's also possible for bestCost to be infinity", "\t\t\t\t** AND for current cost to be infinity, as well. In that case", "\t\t\t\t** we can't really tell much by comparing the two, so for lack", "\t\t\t\t** of better alternative we look at the row counts. See", "\t\t\t\t** CostEstimateImpl.compare() for more.", "\t\t\t\tif ((! foundABestPlan) ||", "\t\t\t\t\t(currentCost.compare(bestCost) < 0) ||", "\t\t\t\t\tbestCost.isUninitialized())" ], "header": "@@ -1368,8 +1368,24 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t\t\t\tif ((! foundABestPlan) || currentCost.compare(bestCost) < 0)" ] }, { "added": [ "\t\t\t\t\t\tif ((currentSortAvoidanceCost.compare(bestCost) <= 0)", "\t\t\t\t\t\t\t|| bestCost.isUninitialized())" ], "header": "@@ -1414,7 +1430,8 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t\t\t\t\t\tif (currentSortAvoidanceCost.compare(bestCost) <= 0)" ] }, { "added": [ "\t\t// Before considering the cost, make sure we set the optimizable's", "\t\t// \"current\" cost to be the one that we found. Doing this allows", "\t\t// us to compare \"current\" with \"best\" later on to find out if", "\t\t// the \"current\" plan is also the \"best\" one this round--if it's", "\t\t// not then we'll have to revert back to whatever the best plan is.", "\t\t// That check is performed in getNextDecoratedPermutation() of", "\t\t// this class.", "\t\toptimizable.getCurrentAccessPath().setCostEstimate(estimatedCost);", "" ], "header": "@@ -1776,6 +1793,15 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t// RESOLVE: The following call to memoryUsageOK does not behave", "\t\t// correctly if outerCost.rowCount() is POSITIVE_INFINITY; see", "\t\t// DERBY-1259." ], "header": "@@ -1783,6 +1809,9 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\tbestCostEstimate.isUninitialized() ||" ], "header": "@@ -1797,6 +1826,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\t\t\t\tbestCostEstimate.isUninitialized() ||" ], "header": "@@ -1844,6 +1874,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "", " // RESOLVE: The following call to memoryUsageOK does not behave", " // correctly if outerCost.rowCount() is POSITIVE_INFINITY; see", " // DERBY-1259." ], "header": "@@ -1912,6 +1943,10 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\tbestCostEstimate.isUninitialized() ||" ], "header": "@@ -1935,6 +1970,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ProjectRestrictNode.java", "hunks": [ { "added": [ "", "\t\t\t// Note: we don't call \"optimizer.considerCost()\" here because", "\t\t\t// a) the child will make that call as part of its own", "\t\t\t// \"optimizeIt()\" work above, and b) the child might have", "\t\t\t// different criteria for \"considering\" (i.e. rejecting or", "\t\t\t// accepting) a plan's cost than this ProjectRestrictNode does--", "\t\t\t// and we don't want to override the child's decision. So as", "\t\t\t// with most operations in this class, if the child is an", "\t\t\t// Optimizable, we just let it do its own work and make its", "\t\t\t// own decisions." ], "header": "@@ -324,7 +324,16 @@ public class ProjectRestrictNode extends SingleChildResultSetNode", "removed": [ "\t\t\toptimizer.considerCost(this, restrictionList, getCostEstimate(), outerCost);" ] } ] } ]
derby-DERBY-1260-e540aee4
DERBY-1007: Follow up patch to earlier submitted patch. In a word, the fix for this issue ensures that, in the case of subqueries, the optimizer will correctly propagate the estimated costs for subqueries up to the parent subquery(-ies), thus allowing the parent query to make a better decision about which join order is ultimately the best. As seen in the example scenario included above, the correct estimates are higher--sometimes much higher--than what the optimizer was returning prior to this change: in the example, the optimizer was returning an incorrect cost estimate of 10783 before the patch, and a correct estimate of 1 million after the patch (where "correct" means that it's the value calculated by the optimizer and thus the value that should be returned; I'm not saying anything about the accuracy of the estimate here). One side effect of this is that, for very deeply nested queries and/or queries with a high number of FROM tables/expressions, the higher cost estimates can be multiplied--sometimes many times over--throughout the optimization process, which means that the overall query estimate can climb to a much larger number much more quickly. If the query is big enough, this can actually cause the optimizer to reach an estimated cost of INFINITY. That said, the current optimizer logic for choosing a plan does not expect to see an estimate of infinity for its plans. As a result the optimizer does comparisons of, and arithmetic with, cost estimates and row counts that, when applied to Infinity, give unexpected results. I have filed DERBY-1259 and DERBY-1260 to address the "infinity problem" in more detail, but am attaching here a follow-up patch that takes some basic steps toward making the optimizer more robust in the face of infinite cost estimates, which are now more likely to occur given the DERBY-1007 changes. In particular, the d1007_followup_v1.patch does the following: 1) Fixes a couple of small problems with the handling of estimates for FromBaseTables, to ensure that a FromBaseTable's estimate is correctly propagated to (and handled by) the ProjectRestrictNode that sits above it. This parallels the original DERBY-1007 work but is a much simpler "follow-up" task as it deals only with base tables instead of subqueries, and thus the changes are fairly minor. 2) There are several places in OptimizerImpl where the optimizer will only choose to accept a plan's cost if the cost is less than the current "bestCost". If no best cost has been found yet, bestCost is set to an uninitialized value of Double.MAX_VALUE with the assumption that the first valid plan will have a cost less than Double.MAX_VALUE and thus will be chosen as the best so far. However, since a plan's cost estimate can actually end up being Double.POSITIVE_INFINITY, which is greater than Double.MAX_VALUE, it's possible that the optimizer will reject a valid join order because its cost is infinity, and then end up completing without ever finding a valid plan--which is wrong. What we want is for the optimizer to accept the first valid plan that it finds, regardless of what the cost is. Then if it later finds a better plan, it can use that. So in several places the d1007_followup_v1.patch adds a check to see if bestCost is uninitialized and, if so, we'll always accept the first valid join order we find, regardless of what its cost is--even if it's infinity--because that's better than no plan at all. 3) Modifies the "compare" method in CostEstimateImpl.java to try to account for comparisons between two plans that both have infinite costs. If this happens, we don't have much choice but to guess as to which plan is actually better. So the changes for followup_v1 make that guess based on a comparison of row counts for the two plans. And if the row counts themselves are infinity, then we'll guess based on the single scan row counts. And finally, if those values are both infinity, as well, then we're out of luck and we just say that the two costs are "equal" for lack of better alternative. 4) And finally, due to unexpected behavior that results from arithmetic using infinity (see DERBY-1259), it is currently possible (though rather rare) for the optimizer to decide to do a hash join that has a cost estimate of Infinity. An example of a query for which this could happen can be found in DERBY-1205, query #1. That said, the BackingStoreHashtable that is used for carrying out a hash join currently creates a Java Hashtable instance with a capacity that matches the optimizer's estimated row count. So if the row count is infinity we'll try to create a Hashtable with some impossibly large capacity and, as a result, we'll end up with an OutOfMemory error. So the d1007_followup_v1.patch adds some code to handle this kind of situation in a more graceful manner. I ran derbyall with these changes on Linux Red Hat using ibm142 and saw no new failures. Submitted by Army Brown (qozinx@gmail.com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@397675 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/store/access/BackingStoreHashtable.java", "hunks": [ { "added": [ " /* We want to create the hash table based on the estimated row", " * count if a) we have an estimated row count (i.e. it's greater", " * than zero) and b) we think we can create a hash table to", " * hold the estimated row count without running out of memory.", " * The check for \"b\" is required because, for deeply nested", " * queries and/or queries with a high number of tables in", " * their FROM lists, the optimizer can end up calculating", " * some very high row count estimates--even up to the point of", " * Double.POSITIVE_INFINITY (see DERBY-1259 for an explanation", " * of how that can happen). In that case any attempts to", " * create a Hashtable of size estimated_rowcnt can cause", " * OutOfMemory errors when we try to create the Hashtable.", " * So as a \"red flag\" for that kind of situation, we check to", " * see if the estimated row count is greater than the max", " * in-memory size for this table. Unit-wise this comparison", " * is relatively meaningless: rows vs bytes. But if our", " * estimated row count is greater than the max number of", " * in-memory bytes that we're allowed to consume, then", " * it's very likely that creating a Hashtable with a capacity", " * of estimated_rowcnt will lead to memory problems. So in", " * that particular case we leave hash_table null here and", " * initialize it further below, using the estimated in-memory", " * size of the first row to figure out what a reasonable size", " * for the Hashtable might be.", " */", " (((estimated_rowcnt <= 0) || (row_source == null)) ?", " new Hashtable() :", " (estimated_rowcnt < max_inmemory_size) ?", " new Hashtable((int) estimated_rowcnt) :", " null);" ], "header": "@@ -224,9 +224,37 @@ public class BackingStoreHashtable", "removed": [ " ((estimated_rowcnt <= 0) ? ", " new Hashtable() : new Hashtable((int) estimated_rowcnt));" ] }, { "added": [ " // If we haven't initialized the hash_table yet then that's", " // because a Hashtable with capacity estimated_rowcnt would", " // probably cause memory problems. So look at the first row", " // that we found and use that to create the hash table with", " // an initial capacity such that, if it was completely full,", " // it would still satisfy the max_inmemory condition. Note", " // that this isn't a hard limit--the hash table can grow if", " // needed.", " if (hash_table == null)", " {", "\t\t\t\t\t// Check to see how much memory we think the first row", " // is going to take, and then use that to set the initial", " // capacity of the Hashtable.", " double rowUsage = getEstimatedMemUsage(row);", " hash_table = new Hashtable((int)(max_inmemory_size / rowUsage));", " }" ], "header": "@@ -235,6 +263,22 @@ public class BackingStoreHashtable", "removed": [] }, { "added": [ " max_inmemory_size -= getEstimatedMemUsage(row);" ], "header": "@@ -387,13 +431,7 @@ public class BackingStoreHashtable", "removed": [ " for( int i = 0; i < row.length; i++)", " {", " if( row[i] instanceof DataValueDescriptor)", " max_inmemory_size -= ((DataValueDescriptor) row[i]).estimateMemoryUsage();", " max_inmemory_size -= ClassSize.refSize;", " }", " max_inmemory_size -= ClassSize.refSize;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/OptimizerImpl.java", "hunks": [ { "added": [ "\t\t\t\t**", "\t\t\t\t** For very deeply nested queries, it's possible that the optimizer", "\t\t\t\t** will return an estimated cost of Double.INFINITY, which is", "\t\t\t\t** greater than our uninitialized cost of Double.MAX_VALUE and", "\t\t\t\t** thus the \"compare\" check below will return false. So we have", "\t\t\t\t** to check to see if bestCost is uninitialized and, if so, we", "\t\t\t\t** save currentCost regardless of what value it is--because we", "\t\t\t\t** haven't found anything better yet.", "\t\t\t\t**", "\t\t\t\t** That said, it's also possible for bestCost to be infinity", "\t\t\t\t** AND for current cost to be infinity, as well. In that case", "\t\t\t\t** we can't really tell much by comparing the two, so for lack", "\t\t\t\t** of better alternative we look at the row counts. See", "\t\t\t\t** CostEstimateImpl.compare() for more.", "\t\t\t\tif ((! foundABestPlan) ||", "\t\t\t\t\t(currentCost.compare(bestCost) < 0) ||", "\t\t\t\t\tbestCost.isUninitialized())" ], "header": "@@ -1368,8 +1368,24 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t\t\t\tif ((! foundABestPlan) || currentCost.compare(bestCost) < 0)" ] }, { "added": [ "\t\t\t\t\t\tif ((currentSortAvoidanceCost.compare(bestCost) <= 0)", "\t\t\t\t\t\t\t|| bestCost.isUninitialized())" ], "header": "@@ -1414,7 +1430,8 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t\t\t\t\t\tif (currentSortAvoidanceCost.compare(bestCost) <= 0)" ] }, { "added": [ "\t\t// Before considering the cost, make sure we set the optimizable's", "\t\t// \"current\" cost to be the one that we found. Doing this allows", "\t\t// us to compare \"current\" with \"best\" later on to find out if", "\t\t// the \"current\" plan is also the \"best\" one this round--if it's", "\t\t// not then we'll have to revert back to whatever the best plan is.", "\t\t// That check is performed in getNextDecoratedPermutation() of", "\t\t// this class.", "\t\toptimizable.getCurrentAccessPath().setCostEstimate(estimatedCost);", "" ], "header": "@@ -1776,6 +1793,15 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t// RESOLVE: The following call to memoryUsageOK does not behave", "\t\t// correctly if outerCost.rowCount() is POSITIVE_INFINITY; see", "\t\t// DERBY-1259." ], "header": "@@ -1783,6 +1809,9 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\tbestCostEstimate.isUninitialized() ||" ], "header": "@@ -1797,6 +1826,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\t\t\t\tbestCostEstimate.isUninitialized() ||" ], "header": "@@ -1844,6 +1874,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "", " // RESOLVE: The following call to memoryUsageOK does not behave", " // correctly if outerCost.rowCount() is POSITIVE_INFINITY; see", " // DERBY-1259." ], "header": "@@ -1912,6 +1943,10 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\tbestCostEstimate.isUninitialized() ||" ], "header": "@@ -1935,6 +1970,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ProjectRestrictNode.java", "hunks": [ { "added": [ "", "\t\t\t// Note: we don't call \"optimizer.considerCost()\" here because", "\t\t\t// a) the child will make that call as part of its own", "\t\t\t// \"optimizeIt()\" work above, and b) the child might have", "\t\t\t// different criteria for \"considering\" (i.e. rejecting or", "\t\t\t// accepting) a plan's cost than this ProjectRestrictNode does--", "\t\t\t// and we don't want to override the child's decision. So as", "\t\t\t// with most operations in this class, if the child is an", "\t\t\t// Optimizable, we just let it do its own work and make its", "\t\t\t// own decisions." ], "header": "@@ -324,7 +324,16 @@ public class ProjectRestrictNode extends SingleChildResultSetNode", "removed": [ "\t\t\toptimizer.considerCost(this, restrictionList, getCostEstimate(), outerCost);" ] } ] } ]
derby-DERBY-1262-baef65af
DERBY-1262: Like-predicates: % does not match tab character Patch contributed by Håvard Mork <havard.mork@gmail.com>. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@411174 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/Like.java", "hunks": [ { "added": [ "\tpublic static String greaterEqualStringFromParameter(String pattern, int maxWidth)", "\t\treturn greaterEqualString(pattern, (String) null, maxWidth);", "\tpublic static String greaterEqualStringFromParameterWithEsc(String pattern, String escape, int maxWidth)", "\t\treturn greaterEqualString(pattern, escape, maxWidth);" ], "header": "@@ -688,22 +688,22 @@ public class Like {", "removed": [ "\tpublic static String greaterEqualStringFromParameter(String pattern)", "\t\treturn greaterEqualString(pattern, (String) null);", "\tpublic static String greaterEqualStringFromParameterWithEsc(String pattern, String escape)", "\t\treturn greaterEqualString(pattern, escape);" ] }, { "added": [ "\t * @param maxWidth\tMaximum length of column, for null padding", "\tpublic static String greaterEqualString(String pattern, String escape, int maxWidth)" ], "header": "@@ -711,10 +711,11 @@ public class Like {", "removed": [ "\tpublic static String greaterEqualString(String pattern, String escape)" ] }, { "added": [ "\t\t\t\treturn padWithNulls(greaterEqualString(pattern, escChar), maxWidth);", "\t\t\tif (firstAnyString != -1) // no _, found %", "\t\t\t\tpattern = pattern.substring(0, firstAnyString);", "\t\t\tpattern = pattern.substring(0, firstAnyChar);", "\t\t\tpattern = pattern.substring(0, (firstAnyChar > firstAnyString) ? ", "\t\t\t\t\t\t\t\t\t\t\tfirstAnyString :", "\t\t\t\t\t\t\t\t\t\t\tfirstAnyChar);", "\t\treturn padWithNulls(pattern, maxWidth);" ], "header": "@@ -735,32 +736,29 @@ public class Like {", "removed": [ "\t\t\t\treturn greaterEqualString(pattern, escChar);", "\t\t\tif (firstAnyString == -1)", "\t\t\t{", "\t\t\t\treturn pattern;", "\t\t\t}", "\t\t\telse\t// no _, found %", "\t\t\t\treturn pattern.substring(0, firstAnyString);", "\t\t\treturn pattern.substring(0, firstAnyChar);", "\t\t\treturn pattern.substring(0, (firstAnyChar > firstAnyString) ? ", "\t\t\t\t\t\t\t\t\t\tfirstAnyString :", "\t\t\t\t\t\t\t\t\t\tfirstAnyChar);" ] }, { "added": [ "\t * @param maxWidth\tMaximum length of column, for null padding", "\tpublic static String lessThanString(String pattern, int maxWidth)" ], "header": "@@ -867,11 +865,12 @@ public class Like {", "removed": [ "\tpublic static String lessThanString(String pattern)" ] }, { "added": [ "\t\treturn padWithNulls(new String(charArray), maxWidth);", "\tpublic static String lessThanStringFromParameter(String pattern, int maxWidth)", "\t\tthrows StandardException ", "\t{", "\t\treturn lessThanString(pattern, null, maxWidth);", "\tpublic static String lessThanStringFromParameterWithEsc(String pattern, String escape, int maxWidth)", "\t\tthrows StandardException", "\t\treturn lessThanString(pattern, escape, maxWidth);" ], "header": "@@ -902,21 +901,23 @@ public class Like {", "removed": [ "\t\treturn new String(charArray);", "\tpublic static String lessThanStringFromParameter(String pattern) throws StandardException {", "\t\treturn lessThanString(pattern, null);", "\tpublic static String lessThanStringFromParameterWithEsc(String pattern, String escape)", "\t\t throws StandardException", "\t\treturn lessThanString(pattern, escape);" ] }, { "added": [ "\t * @param maxWidth\tMaximum length of column, for null padding", "\tpublic static String lessThanString(String pattern, String escape, int maxWidth)" ], "header": "@@ -929,11 +930,12 @@ public class Like {", "removed": [ "\tpublic static String lessThanString(String pattern, String escape)" ] }, { "added": [ "\t\t\treturn padWithNulls(new String(charArray), maxWidth);" ], "header": "@@ -1033,7 +1035,7 @@ public class Like {", "removed": [ "\t\t\treturn new String(charArray);" ] }, { "added": [ "\t\treturn padWithNulls(gt, maxWidth);" ], "header": "@@ -1058,7 +1060,7 @@ public class Like {", "removed": [ "\t\treturn gt;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/LikeEscapeOperatorNode.java", "hunks": [ { "added": [ "\t\t\tint maxWidth = receiver.getTypeServices().getMaximumWidth();", "\t\t\tgreaterEqualString = Like.greaterEqualString(pattern, escape, ", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t maxWidth);" ], "header": "@@ -552,7 +552,9 @@ public final class LikeEscapeOperatorNode extends TernaryOperatorNode", "removed": [ "\t\t\tgreaterEqualString = Like.greaterEqualString(pattern, escape);" ] }, { "added": [ "\t\t\t\tlessThanString = Like.lessThanString(pattern, escape, maxWidth);" ], "header": "@@ -561,7 +563,7 @@ public final class LikeEscapeOperatorNode extends TernaryOperatorNode", "removed": [ "\t\t\t\tlessThanString = Like.lessThanString(greaterEqualString);" ] }, { "added": [ "\t\t\t\tint maxWidth = receiver.getTypeServices().getMaximumWidth();", "\t\t\t\t\t\t\t\t\"lessThanStringFromParameter\", maxWidth);" ], "header": "@@ -608,8 +610,9 @@ public final class LikeEscapeOperatorNode extends TernaryOperatorNode", "removed": [ "\t\t\t\t\t\t\t\t\"lessThanStringFromParameter\");" ] }, { "added": [ "\t\t\tint maxWidth = receiver.getTypeServices().getMaximumWidth();", "\t\t\t\t\t\t\t\t\"greaterEqualStringFromParameter\", maxWidth);" ], "header": "@@ -653,8 +656,9 @@ public final class LikeEscapeOperatorNode extends TernaryOperatorNode", "removed": [ "\t\t\t\t\t\t\t\t\t\"greaterEqualStringFromParameter\");" ] }, { "added": [ "\t\t\t\t\t\tValueNode escapeNode,String methodName, int maxWidth)" ], "header": "@@ -787,7 +791,7 @@ public final class LikeEscapeOperatorNode extends TernaryOperatorNode", "removed": [ "\t\t\t\t\t\tValueNode escapeNode,String methodName)" ] } ] } ]
derby-DERBY-1265-d04e05e1
DERBY-1265: Commit bug1265_01_sortMethods.diff. Sorts the result of Class.getMethods() to remove an indeterminacy in the test. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@398630 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1272-66a8763a
DERBY-1272 (partial) Log sysinfo to derby.log with derby.stream.error.logSeverityLevel=0 Does not include test at this time because of test ordering problem. Contributed by Andrew McIntyre, Ramin Moazeni. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@592590 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/monitor/BaseMonitor.java", "hunks": [ { "added": [ " // DERBY-1272: Print sysinfo to log if derby.stream.error.logSeverityLevel=0", " int logSeverityLevel = PropertyUtil.getSystemInt(Property.LOG_SEVERITY_LEVEL,", " SanityManager.DEBUG ? 0 : ExceptionSeverity.SESSION_SEVERITY);", " if (logSeverityLevel == 0) {", " if (systemStreams != null) {", " systemStreams.stream().printlnWithHeader(\"\");", " org.apache.derby.tools.sysinfo.getInfo(systemStreams.stream().getPrintWriter());", " } else {", " org.apache.derby.tools.sysinfo.getInfo(getTempWriter());", " }", " }", "" ], "header": "@@ -1936,6 +1936,18 @@ nextModule:", "removed": [] } ] } ]
derby-DERBY-1272-7ca0b34c
DERBY-1272 Backout change 592590 which was causing Exception initializer error with classes. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@593654 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/monitor/BaseMonitor.java", "hunks": [ { "added": [], "header": "@@ -1936,18 +1936,6 @@ nextModule:", "removed": [ " // DERBY-1272: Print sysinfo to log if derby.stream.error.logSeverityLevel=0", " int logSeverityLevel = PropertyUtil.getSystemInt(Property.LOG_SEVERITY_LEVEL,", " SanityManager.DEBUG ? 0 : ExceptionSeverity.SESSION_SEVERITY);", " if (logSeverityLevel == 0) {", " if (systemStreams != null) {", " systemStreams.stream().printlnWithHeader(\"\");", " org.apache.derby.tools.sysinfo.getInfo(systemStreams.stream().getPrintWriter());", " } else {", " org.apache.derby.tools.sysinfo.getInfo(getTempWriter());", " }", " }", "" ] } ] } ]
derby-DERBY-1274-177992f5
DERBY-1274: Network Server does not shutdown the databases it has booted when started and shutdown from the command line Fix submitted by Fernanda Pizzorno. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@421856 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/NetworkServerControlImpl.java", "hunks": [ { "added": [ "\t// if the server is started from the command line, it should shutdown the", "\t// databases it has booted.", "\tprivate boolean shutdownDatabasesOnShutdown = false;" ], "header": "@@ -290,6 +290,9 @@ public final class NetworkServerControlImpl {", "removed": [] }, { "added": [ "\t\tif (shutdownDatabasesOnShutdown) {", "", "\t\t\t// Shutdown Cloudscape", "\t\t\ttry {", "\t\t\t\tif (cloudscapeDriver != null)", "\t\t\t\t\tcloudscapeDriver.connect(\"jdbc:derby:;shutdown=true\",", "\t\t\t\t\t\t\t\t\t\t\t (Properties) null);", "\t\t\t} catch (SQLException sqle) {", "\t\t\t\t// If we can't shutdown cloudscape. Perhaps authentication is", "\t\t\t\t// set to true or some other reason. We will just print a", "\t\t\t\t// message to the console and proceed.", "\t\t\t\tif (((EmbedSQLException)sqle).getMessageId() !=", "\t\t\t\t SQLState.CLOUDSCAPE_SYSTEM_SHUTDOWN)", "\t\t\t\t\tconsolePropertyMessage(\"DRDA_ShutdownWarning.I\",", "\t\t\t\t\t\t\t\t\t\t sqle.getMessage());", "\t\t\t}", "\t\t}" ], "header": "@@ -639,22 +642,23 @@ public final class NetworkServerControlImpl {", "removed": [ "\t\t/*", "\t\t// Shutdown Cloudscape", "\t\ttry {", "\t\t\tif (cloudscapeDriver != null)", "\t\t\t\tcloudscapeDriver.connect(\"jdbc:derby:;shutdown=true\", ", "\t\t\t\t\t\t\t\t\t\t (Properties) null);", "\t\t} catch (SQLException sqle) {", "\t\t\t// If we can't shutdown cloudscape. Perhaps authentication is", "\t\t\t// set to true or some other reason. We will just print a", "\t\t\t// message to the console and proceed.", "\t\t\tif (((EmbedSQLException)sqle).getMessageId() !=", "\t\t\t SQLState.CLOUDSCAPE_SYSTEM_SHUTDOWN)", "\t\t\t\tconsolePropertyMessage(\"DRDA_ShutdownWarning.I\",", "\t\t\t\t\t\t\t\t\t sqle.getMessage());", "\t\t}", "\t\t*/" ] } ] } ]
derby-DERBY-1275-1812ea86
Commiting patch DERBY1275EnableClientTracingDiffV5.txt attached to DERBY-1275. This patch adds 2 JVM properties to enable client side tracing. The properties are derby.client.traceLevel and derby.client.traceDirectory More info can be found at http://wiki.apache.org/db-derby/UndocumentedDerbyBehavior git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@504317 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/LogWriter.java", "hunks": [ { "added": [ "import java.security.AccessController;", "" ], "header": "@@ -24,10 +24,12 @@ package org.apache.derby.client.am;", "removed": [] } ] }, { "file": "java/client/org/apache/derby/jdbc/ClientBaseDataSource.java", "hunks": [ { "added": [ "import java.security.AccessController;" ], "header": "@@ -23,16 +23,14 @@ package org.apache.derby.jdbc;", "removed": [ "import java.util.Enumeration;", "import java.lang.reflect.Field;", "import javax.naming.RefAddr;" ] }, { "added": [ " /**", " * Check if derby.client.traceDirectory is provided as a JVM property. ", " * If yes, then we use that value. If not, then we look for traceDirectory ", " * in the the properties parameter.", " *", " * @param properties jdbc url properties ", " * @return value of traceDirectory property", " */", " \tString traceDirectoryString;", " \ttraceDirectoryString = readSystemProperty(Attribute.CLIENT_JVM_PROPERTY_PREFIX+Attribute.CLIENT_TRACE_DIRECTORY);", "\t\tif (traceDirectoryString == null) ", "\t\t\treturn properties.getProperty(Attribute.CLIENT_TRACE_DIRECTORY);", "\t\telse", "\t\t\treturn traceDirectoryString;", " }", " ", " /**", " * Read the value of the passed system property.", " * @param key name of the system property", " * @return value of the system property", " */", " private static String readSystemProperty(final String key) {", " \t//Using an anonymous class to read the system privilege because the", " \t//method java.security.AccessController.doPrivileged requires an ", " \t//instance of a class(which implements java.security.PrivilegedAction). ", " \t//Since readSystemProperty method is static, we can't simply pass \"this\" ", " \t//to doPrivileged method and have ClientBaseDataSource implement ", " \t//PrivilegedAction. To get around the static nature of method ", " \t//readSystemProperty, have an anonymous class implement PrivilegeAction.", " \t//This class will read the system property in it's run method and", " \t//return the value to the caller.", " \treturn (String )AccessController.doPrivileged", " \t (new java.security.PrivilegedAction(){", " \t\t public Object run(){", " \t\t\treturn System.getProperty(key);", " \t\t }", " \t }", " \t );" ], "header": "@@ -341,8 +339,45 @@ public abstract class ClientBaseDataSource implements Serializable, Referenceabl", "removed": [ " return properties.getProperty(Attribute.CLIENT_TRACE_DIRECTORY);" ] }, { "added": [ " /**", " * Check if derby.client.traceLevel is provided as a JVM property. ", " * If yes, then we use that value. If not, then we look for traceLevel ", " * in the the properties parameter.", " *", " * @param properties jdbc url properties ", " * @return value of traceLevel property", " */", " \tString traceLevelString;", " \ttraceLevelString = readSystemProperty(Attribute.CLIENT_JVM_PROPERTY_PREFIX+Attribute.CLIENT_TRACE_LEVEL);", "\t\tif (traceLevelString == null) ", "\t\t\ttraceLevelString = properties.getProperty(Attribute.CLIENT_TRACE_LEVEL);", "\t\treturn parseInt(traceLevelString, propertyDefault_traceLevel);" ], "header": "@@ -839,9 +874,20 @@ public abstract class ClientBaseDataSource implements Serializable, Referenceabl", "removed": [ " String traceLevelString = properties.getProperty(Attribute.CLIENT_TRACE_LEVEL);", " return parseInt(traceLevelString, propertyDefault_traceLevel);" ] } ] } ]
derby-DERBY-1276-7855f496
DERBY-1276 Calling ResultSet.isLast() on a scrollable insensitive resultset, causes the entire ResultSet to be populated. Submitted by Andreas Korneliussen git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@409009 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/ScrollInsensitiveResultSet.java", "hunks": [ { "added": [ "\t\t\tif (beforeFirst || afterLast || currentPosition==0 ||", "\t\t\t\tcurrentPosition<positionInSource)", "\t\t\t}\t\t\t", "\t\t\t", "\t\t\t/* If we have seen the last row, we can tell if we are ", "\t\t\t * on it by comparing currentPosition with lastPosition.", "\t\t\t * Otherwise, we check if there is a next row.", "\t\t\t\treturn (currentPosition == lastPosition);", "\t\t\t\tfinal int savePosition = currentPosition;", "\t\t\t\tfinal boolean retval = (getNextRowFromSource() == null);" ], "header": "@@ -739,31 +739,24 @@ public class ScrollInsensitiveResultSet extends NoPutResultSetImpl", "removed": [ "\t\t\tif (beforeFirst || afterLast)", "\t\t\t}", "", "\t\t\t/* If we've already seen the last row", "\t\t\t * then we can tell if we are on it by", "\t\t\t * the current position,", "\t\t\t * otherwise, we need to find the last", "\t\t\t * row in order to tell if the current row", "\t\t\t * is the last row.", "\t\t\t\treturn (currentPosition == lastPosition && currentPosition != 0);", "\t\t\t\tint savePosition = currentPosition;", "\t\t\t\tboolean retval = false;", "\t\t\t\tgetLastRow();", "\t\t\t\tif (savePosition == lastPosition && savePosition != 0)", "\t\t\t\t{", "\t\t\t\t\tretval = true;", "\t\t\t\t}" ] } ] } ]
derby-DERBY-1277-a03c87fa
DERBY-1277: Call to rs.isLast() may cause rs.getXXX() return values from the last row instead of the current row in scrollable resultsets In scrollable updatable resultset, a call to rs.isLast() may cause rs.getXXX() return values from the last row, instead of for the current row. It is caused by TableScanResultSet and EmbedResultSet sharing DataValueColumn descriptors, and that the call to isLast() will make TableScanResultSet modify the data. Attaching a fix, where ScrollInsensitiveResultSet does not return ExecRow from the source resultset, only from the hashtable. This ensures that navigation in source resultsets do not affect the current row of the scrollinsensitive resultset. Also extended the test testRelative to test with concurrency mode CONCUR_UPDATABLE, and fixed it so that it can run in derbynetclient framework. Patch contributed by Andreas Korneliussen. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@400212 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/ScrollInsensitiveResultSet.java", "hunks": [ { "added": [ "\t\t\tif (result != null) {", "\t\t\t\tresult = getRowFromHashTable(row);", "\t\t\t}" ], "header": "@@ -345,6 +345,9 @@ public class ScrollInsensitiveResultSet extends NoPutResultSetImpl", "removed": [] }, { "added": [ "\t\t\tif (result !=null) {", "\t\t\t\tresult = getRowFromHashTable(currentPosition);", "\t\t\t}" ], "header": "@@ -515,6 +518,9 @@ public class ScrollInsensitiveResultSet extends NoPutResultSetImpl", "removed": [] } ] } ]
derby-DERBY-1282-7bac3b36
DERBY-1282: Dyre's derby-1282.v1.diff patch. Fills in new client info methods added by JDBC4. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@407617 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/LogicalConnection40.java", "hunks": [ { "added": [ " /**", " * <code>getClientInfo</code> forwards to", " * <code>physicalConnection_</code>.", " * <code>getClientInfo</code> always returns an empty", " * <code>Properties</code> object since Derby doesn't support", " * ClientInfoProperties.", " *", " * @return an empty <code>Properties</code> object", " * @exception SQLException if an error occurs", " */", "\tcheckForNullPhysicalConnection();", "\treturn physicalConnection_.getClientInfo();", " /**", " * <code>getClientInfo</code> forwards to", " * <code>physicalConnection_</code>. Always returns a <code>null", " * String</code> since Derby does not support", " * ClientInfoProperties.", " *", " * @param name a property key to get <code>String</code>", " * @return a property value <code>String</code>", " * @exception SQLException if an error occurs", " */", "\tcheckForNullPhysicalConnection();", "\treturn physicalConnection_.getClientInfo(name);" ], "header": "@@ -87,14 +87,36 @@ public class LogicalConnection40", "removed": [ " throw SQLExceptionFactory.notImplemented(\"getClientInfo()\");", " throw SQLExceptionFactory.notImplemented(\"getClientInfo(String)\");" ] } ] }, { "file": "java/client/org/apache/derby/client/net/NetConnection40.java", "hunks": [ { "added": [ "import java.util.Enumeration;", "import org.apache.derby.client.am.FailedProperties40;" ], "header": "@@ -37,9 +37,11 @@ import java.sql.Struct;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection40.java", "hunks": [ { "added": [ "//import org.apache.derby.impl.jdbc.EmbedConnection40;" ], "header": "@@ -31,6 +31,7 @@ import java.sql.SQLXML;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection40.java", "hunks": [ { "added": [ "import java.util.Enumeration;", "import org.apache.derby.iapi.error.StandardException;", "import org.apache.derby.iapi.jdbc.FailedProperties40;" ], "header": "@@ -33,8 +33,11 @@ import java.sql.Struct;", "removed": [] } ] } ]
derby-DERBY-129-75ec275f
DERBY-129: CAST should warn about truncation Made subclasses of SQLChar and SQLBinary generate a DataTruncation warning when they are truncated to a shorter value. Pass warnings up to the statement from NoRowsResultSetImpl. Make the network server transfer DataTruncation in a way the client accepts. Update test canons to expect warnings on truncation. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1341046 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java", "hunks": [ { "added": [ "\t\twriteSQLCAGRP(e, 0, 0);" ], "header": "@@ -2374,7 +2374,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\twriteSQLCAGRP(e, getSqlCode(getExceptionSeverity(e)), 0, 0);" ] }, { "added": [ "\t\twriteSQLCAGRP(e, updateCount, rowCount);" ], "header": "@@ -5910,7 +5910,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\twriteSQLCAGRP(e, getSqlCode(severity), updateCount, rowCount);" ] }, { "added": [ " private void writeSQLCAGRP(SQLException e, int updateCount, long rowCount)", " throws DRDAProtocolException", " int sqlcode = 0;", "" ], "header": "@@ -6040,13 +6040,14 @@ class DRDAConnThread extends Thread {", "removed": [ "\t * @param sqlcode\tsqlcode", "\tprivate void writeSQLCAGRP(SQLException e, int sqlcode, int updateCount,", "\t\t\tlong rowCount) throws DRDAProtocolException" ] }, { "added": [ " // SQLWarnings should have warning severity, except if it's a", " // DataTruncation warning for write operations (with SQLState 22001),", " // which is supposed to be used as an exception even though it's a", " // sub-class of SQLWarning.", " if (e instanceof SQLWarning &&", " !SQLState.LANG_STRING_TRUNCATION.equals(e.getSQLState())) {", " sqlcode = ExceptionSeverity.WARNING_SEVERITY;", " } else {", " // Get the SQLCODE for exceptions. Note that this call will always", " // return -1, so the real error code will be lost.", " sqlcode = getSqlCode(getExceptionSeverity(e));", " }", "" ], "header": "@@ -6054,6 +6055,19 @@ class DRDAConnThread extends Thread {", "removed": [] }, { "added": [ "\t\twriteSQLCAGRP(e, 0, 0);" ], "header": "@@ -6635,7 +6649,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\twriteSQLCAGRP(e, getSqlCode(getExceptionSeverity(e)), 0, 0);" ] }, { "added": [ "\t\t\t\twriteSQLCAGRP(sqlw, 1, -1);" ], "header": "@@ -7123,7 +7137,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\t\t\twriteSQLCAGRP(sqlw, sqlw.getErrorCode(), 1, -1);" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/services/io/CounterOutputStream.java", "hunks": [ { "added": [ " limit = -1;" ], "header": "@@ -42,7 +42,7 @@ public class CounterOutputStream extends OutputStream implements Limit {", "removed": [ "\t\tsuper();" ] }, { "added": [ "\t\tif (out != null) out.write(b);" ], "header": "@@ -101,25 +101,10 @@ public class CounterOutputStream extends OutputStream implements Limit {", "removed": [ "\t\tout.write(b);", "\t/**", "\t\tAdd b.length to the count.", "", "\t\t@see OutputStream#write", "\t*/", "\tpublic void write(byte b[]) throws IOException {", "\t\t", "\t\tif ((limit >= 0) && ((count + b.length) > limit)) {", "\t\t\tthrow new EOFException();", "\t\t}", "", "\t\tout.write(b);", "\t\tcount += b.length;", "\t}", "" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/SQLBinary.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.conn.StatementContext;", "", "import org.apache.derby.iapi.reference.ContextId;", "import org.apache.derby.iapi.services.context.ContextService;", "", "import org.apache.derby.iapi.services.io.InputStreamUtil;", "import java.sql.DataTruncation;" ], "header": "@@ -21,39 +21,37 @@", "removed": [ "import org.apache.derby.iapi.types.BitDataValue;", "import org.apache.derby.iapi.types.DataValueDescriptor;", "import org.apache.derby.iapi.types.ConcatableDataValue;", "import org.apache.derby.iapi.types.BooleanDataValue;", "import org.apache.derby.iapi.types.NumberDataValue;", "", "import org.apache.derby.iapi.types.SQLInteger;", "", "import org.apache.derby.iapi.services.io.InputStreamUtil;" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/SQLChar.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.io.FormatIdOutputStream;", "import org.apache.derby.iapi.reference.ContextId;" ], "header": "@@ -29,14 +29,9 @@ import org.apache.derby.iapi.services.io.Storable;", "removed": [ "import org.apache.derby.iapi.types.DataTypeDescriptor;", "import org.apache.derby.iapi.types.DataValueDescriptor;", "import org.apache.derby.iapi.types.TypeId;", "import org.apache.derby.iapi.types.StringDataValue;", "import org.apache.derby.iapi.types.NumberDataValue;", "import org.apache.derby.iapi.types.BooleanDataValue;", "import org.apache.derby.iapi.types.ConcatableDataValue;" ] }, { "added": [ "import org.apache.derby.iapi.services.io.CounterOutputStream;", "import org.apache.derby.iapi.sql.conn.StatementContext;" ], "header": "@@ -44,18 +39,15 @@ import org.apache.derby.iapi.jdbc.CharacterStreamDescriptor;", "removed": [ "import org.apache.derby.iapi.types.SQLInteger;", "import org.apache.derby.iapi.types.SQLDate;", "import org.apache.derby.iapi.types.SQLTime;", "import org.apache.derby.iapi.types.SQLTimestamp;", "" ] }, { "added": [ "import java.sql.DataTruncation;" ], "header": "@@ -64,6 +56,7 @@ import java.io.UTFDataFormatException;", "removed": [] }, { "added": [ "", " writeUTF(out, c);", " }", " }", "", " /**", " * Write a single character to a stream in the modified UTF-8 format.", " *", " * @param out the destination stream", " * @param c the character to write", " * @throws IOException if writing to the destination stream fails", " */", " private static void writeUTF(ObjectOutput out, int c) throws IOException {", " if ((c >= 0x0001) && (c <= 0x007F))", " {", " out.write(c);", " }", " else if (c > 0x07FF)", " {", " out.write(0xE0 | ((c >> 12) & 0x0F));", " out.write(0x80 | ((c >> 6) & 0x3F));", " out.write(0x80 | ((c >> 0) & 0x3F));", " }", " else", " {", " out.write(0xC0 | ((c >> 6) & 0x1F));", " out.write(0x80 | ((c >> 0) & 0x3F));" ], "header": "@@ -984,22 +977,33 @@ public class SQLChar", "removed": [ " ", " if ((c >= 0x0001) && (c <= 0x007F))", " {", " out.write(c);", " }", " else if (c > 0x07FF)", " {", " out.write(0xE0 | ((c >> 12) & 0x0F));", " out.write(0x80 | ((c >> 6) & 0x3F));", " out.write(0x80 | ((c >> 0) & 0x3F));", " }", " else", " {", " out.write(0xC0 | ((c >> 6) & 0x1F));", " out.write(0x80 | ((c >> 0) & 0x3F));", " }" ] }, { "added": [ " try {", " } catch (StandardException se) {", " if (errorOnTrunc) {", " throw se;", " }", "", " // Generate a truncation warning, as specified in SQL:2003,", " // part 2, 6.12 <cast specification>, general rules 10)c)2)", " // and 11)c)2).", "", " // Data size and transfer size need to be in bytes per", " // DataTruncation javadoc.", " String source = getString();", " int transferSize = getUTF8Length(source, 0, desiredWidth);", " int dataSize = transferSize +", " getUTF8Length(source, desiredWidth, source.length());", "", " DataTruncation warning = new DataTruncation(", " -1, // column index is unknown", " false, // parameter", " true, // read", " dataSize,", " transferSize);", "", " warning.initCause(se);", "", " StatementContext statementContext = (StatementContext)", " ContextService.getContext(ContextId.LANG_STATEMENT);", " statementContext.getActivation().", " getResultSet().addWarning(warning);", " }" ], "header": "@@ -1914,9 +1918,38 @@ readingLoop:", "removed": [ " if (errorOnTrunc)", " //RESOLVE: should issue a warning instead" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/BasicNoPutResultSetImpl.java", "hunks": [ { "added": [ "\tpublic final void addWarning(SQLWarning w) {" ], "header": "@@ -1018,7 +1018,7 @@ implements NoPutResultSet", "removed": [ "\tprotected final void addWarning(SQLWarning w) {" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/NoRowsResultSetImpl.java", "hunks": [ { "added": [ "import java.sql.SQLWarning;" ], "header": "@@ -21,15 +21,14 @@", "removed": [ "import org.apache.derby.iapi.services.monitor.Monitor;", "import org.apache.derby.iapi.services.stream.HeaderPrintWriter;" ] }, { "added": [], "header": "@@ -45,7 +44,6 @@ import org.apache.derby.iapi.sql.execute.NoPutResultSet;", "removed": [ "import org.apache.derby.iapi.types.DataTypeDescriptor;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/TemporaryRowHolderResultSet.java", "hunks": [ { "added": [ "import java.sql.SQLWarning;" ], "header": "@@ -21,6 +21,7 @@", "removed": [] } ] } ]
derby-DERBY-1292-63d4142e
DERBY-1292 1) The addition of a copy method to org.apache.derby.client.am.ColumnMetaData. 2) Modifications to org.apache.derby.client.am.PreparedStatement to hold on to a copy of the column meta data for each entry used in batch updates. 3) A test was added to org.apache.derbyTestingfunctionTests.tests.derbynet.prepStmt. Patch contributed by James F. Adams derby@xemaps.com git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@437822 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/PreparedStatement.java", "hunks": [ { "added": [ "import java.util.ArrayList;" ], "header": "@@ -27,6 +27,7 @@ import org.apache.derby.shared.common.reference.SQLState;", "removed": [] }, { "added": [ " ", " private ArrayList parameterTypeList;" ], "header": "@@ -62,6 +63,8 @@ public class PreparedStatement extends Statement", "removed": [] }, { "added": [ " parameterTypeList = null;" ], "header": "@@ -86,6 +89,7 @@ public class PreparedStatement extends Statement", "removed": [] }, { "added": [ " ", " if (parameterTypeList == null) {", " parameterTypeList = new ArrayList();", " }" ], "header": "@@ -1361,6 +1365,10 @@ public class PreparedStatement extends Statement", "removed": [] }, { "added": [ " ", " // Get a copy of the parameter type data and save it in a list", " // which will be used later on at the time of batch execution.", " parameterTypeList.add(parameterMetaData_.clientParamtertype_.clone());", " parameterTypeList.add(null);" ], "header": "@@ -1373,8 +1381,13 @@ public class PreparedStatement extends Statement", "removed": [] }, { "added": [ " parameterMetaData_.clientParamtertype_ = (int[]) parameterTypeList.get(i);" ], "header": "@@ -2027,6 +2040,7 @@ public class PreparedStatement extends Statement", "removed": [] } ] } ]
derby-DERBY-1295-9148a9ac
DERBY-1295 contributed by Fernanda Pizzorno. Scroll insensitive resultset should not implicitly close due to positioning in autocommit mode git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@412831 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java", "hunks": [ { "added": [ "\t\t // The ResultSet may implicitly close when when the ResultSet type ", "\t\t // is TYPE_FORWARD_ONLY and the next method of ResultSet returns ", "\t\t // false. This will cause a commit if autocommit = true." ], "header": "@@ -487,16 +487,9 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t /*", "\t\t\t Connection.setAutoCommit says that a statement completes,", "\t\t\t and will autoCommit, when it fetches the last row or is closed.", "\t\t\t This means a close will get a \"Cursor already closed\" error.", "\t\t\t\tThis rule only applies when doing a next() - if it were applied", "\t\t\t\tto scrolling actions (like FIRST or LAST) it would close", "\t\t\t\tthe cursor when doing any operation on a scrolling cursor.", "", "\t\t\t if autocommit, this will commit", "\t\t */" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/functionTests/util/SQLStateConstants.java", "hunks": [ { "added": [ " // The SQLState when calling next on a result set which is closed.", " public static final String RESULT_SET_IS_CLOSED = \"XCL16\";" ], "header": "@@ -336,6 +336,8 @@ public class SQLStateConstants", "removed": [] } ] } ]
derby-DERBY-1296-1c31b3a1
DERBY-1296 Setting property derby.system.bootAll causes NullPointerException in BaseMonitor.bootProviderServices. Fix and test contributed by Fernanda Pizzorno git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@425388 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/functionTests/harness/Sed.java", "hunks": [ { "added": [ " searchStrings.addElement(\"Directory.*.extinout/crwombatlog/log.*.exists\");" ], "header": "@@ -164,6 +164,7 @@ public class Sed", "removed": [] } ] }, { "file": "java/testing/org/apache/derbyTesting/functionTests/util/TestConfiguration.java", "hunks": [ { "added": [ " return jdbcClient.getUrlBase() + name;" ], "header": "@@ -127,7 +127,7 @@ public class TestConfiguration {", "removed": [ " return jdbcClient.getUrlBase() + dbName;" ] }, { "added": [ " return getConnection(getDatabaseName());", " }", " ", " /**", " * Get connection to a database.", " * If the database does not exist, it will be created.", " * A default username and password will be used for the connection.", " *", " * @param databaseName database to connect to", " *", " * @return connection to default database.", " */", " public Connection getConnection (String databaseName) throws SQLException {", " getJDBCUrl(databaseName) + \";create=true\",", " Properties attrs = ", " getDataSourcePropertiesForDatabase(databaseName);", " con = TestDataSourceFactory.getXADataSource(attrs).", " getXAConnection (getUserName(), ", " getUserPassword()).getConnection();", " Properties attrs = getDataSourcePropertiesForDatabase(databaseName);", " con = TestDataSourceFactory.getDataSource(attrs).getConnection();" ], "header": "@@ -216,23 +216,40 @@ public class TestConfiguration {", "removed": [ " getJDBCUrl() + \";create=true\",", " con = TestDataSourceFactory.getXADataSource().getXAConnection (getUserName(),", " getUserPassword()).getConnection(); ", " con = TestDataSourceFactory.getDataSource().getConnection();" ] }, { "added": [ " return getDataSourcePropertiesForDatabase(", " DERBY_TEST_CONFIG.getDatabaseName());", " }", " ", " /**", " * Generate properties which can be set on a", " * <code>DataSource</code> in order to connect to a given", " * database.", " *", " * @param databaseName database to connect to", " *", " * @return a <code>Properties</code> object containing server", " * name, port number, database name and other attributes needed to", " * connect to the database", " */", " public static Properties getDataSourcePropertiesForDatabase", " (String databaseName) ", " {", " attrs.setProperty(\"databaseName\", databaseName);", "" ], "header": "@@ -390,16 +407,34 @@ public class TestConfiguration {", "removed": [ " attrs.setProperty(\"databaseName\", DERBY_TEST_CONFIG.getDatabaseName());", " " ] } ] } ]
derby-DERBY-1296-36498c01
updated source file header for recently added file to comply with new policy. Initial source file also had incorrect copyright dates. It was added as part of DERBY-1296. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@425391 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1303-cccb382b
- DERBY-1303 The test of derbynet/SuicideOfStreaming.java is failed when programs are build as Insane - Patch by Tomohito Nakayama (tomonaka@basil.ocn.ne.jp) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@406921 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1308-57efc3c3
DERBY-1308: Tests jdbcapi/HoldabilityTest.junit, jdbcapi/UpdateXXXTest.junit,jdbcapi/SURTest.junit fail on wctme5.7_foundation Attaching patch DERBY-1308_3_20060527.*. In this patch, I moved the get*DataSource* methods from functionTests/util/BaseJDBCTestCase.java to a new file, functionTests/util/TestDataSourceFactory.java, and the method getDefaultSourceProperties into functionTests/util/TestConfiguration.java. I adjusted affected tests that I could find, and verified the affected tests where applicable with wctme5.7, wsdd5.6, wctme5.7_foundation, jdk142 and jdk16. Patch contributed by Myrna van Lunteren. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@410267 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/functionTests/util/TestDataSourceFactory.java", "hunks": [ { "added": [ "/*", "", " Derby - Class org.apache.derbyTesting.functionTests.util.TestUtil", "", " Copyright 2006 The Apache Software Foundation or its licensors, as applicable.", "", " Licensed under the Apache License, Version 2.0 (the \"License\");", " you may not use this file except in compliance with the License.", " You may obtain a copy of the License at", "", " http://www.apache.org/licenses/LICENSE-2.0", "", " Unless required by applicable law or agreed to in writing, software", " distributed under the License is distributed on an \"AS IS\" BASIS,", " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", " See the License for the specific language governing permissions and", " limitations under the License.", "", " */", "", "", "package org.apache.derbyTesting.functionTests.util;", "", "import java.util.Properties;", "import javax.sql.DataSource;", "import javax.sql.ConnectionPoolDataSource;", "import javax.sql.XADataSource;", "", "/**", " * Utility class for JDBC JUnit tests.", " * Contains methods to obtain the various datasources.", " */", "", "public class TestDataSourceFactory {", "", " /**", " * Return a <code>DataSource</code> for the appropriate framework.", " *", " * @param attrs properties for the data source", " * @return a <code>DataSource</code> object", " * @see TestUtil#getDataSource(Properties)", " */", " public static DataSource getDataSource(Properties attrs) {", " return TestUtil.getDataSource(attrs);", " }", "", " /**", " * Return a <code>DataSource</code> which can establish a", " * connection to the default database.", " *", " * @return a <code>DataSource</code> object", " */", " public static DataSource getDataSource() {", " return getDataSource(TestConfiguration.getDefaultDataSourceProperties());", " }", "", " /**", " * Return a <code>ConnectionPoolDataSource</code> for the", " * appropriate framework.", " *", " * @param attrs properties for the data source", " * @return a <code>ConnectionPoolDataSource</code> object", " * @see TestUtil#getConnectionPoolDataSource(Properties)", " */", " public static ConnectionPoolDataSource", " getConnectionPoolDataSource(Properties attrs)", " {", " return TestUtil.getConnectionPoolDataSource(attrs);", " }", "", " /**", " * Return a <code>ConnectionPoolDataSource</code> which can", " * establish a connection to the default database.", " *", " * @return a <code>ConnectionPoolDataSource</code> object", " */", " public static ConnectionPoolDataSource getConnectionPoolDataSource() {", " return getConnectionPoolDataSource(TestConfiguration.getDefaultDataSourceProperties());", " }", "", " /**", " * Return an <code>XADataSource</code> for the appropriate", " * framework.", " *", " * @param attrs properties for the data source", " * @return an <code>XADataSource</code> object", " * @see TestUtil#getXADataSource(Properties)", " */", " public static XADataSource getXADataSource(Properties attrs) {", " return TestUtil.getXADataSource(attrs);", " }", "", " /**", " * Return an <code>XADataSource</code> which can establish a", " * connection to the default database.", " *", " * @return an <code>XADataSource</code> object", " */", " public static XADataSource getXADataSource() {", " return getXADataSource(TestConfiguration.getDefaultDataSourceProperties());", " }\t", "", "}", "" ], "header": "@@ -0,0 +1,104 @@", "removed": [] } ] } ]
derby-DERBY-1313-4e091b63
DERBY-1313: SUR: Use DRDA's extended diagnostic to send ROW_UPDATED and ROW_DELETED warnings. Submitted by Fernanda Pizzorno git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@411167 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/net/CodePoint.java", "hunks": [ { "added": [ " // SQL Error Diagnostic Level", " // DIAGLVL0 A null SQLDIAGGRP is returned. This is the default.", " // DIAGLVL1 A non-null SQLDIAGGRP should be returned.", " // DIAGLVL2 A non-null SQLDIAGGRP should be returned, and both SQLDCMSG", " // message text fields should be returned as null strings.", " static final byte DIAGLVL0 = (byte)0xF0;", " static final byte DIAGLVL1 = (byte)0xF1;", " static final byte DIAGLVL2 = (byte)0xF2;", "" ], "header": "@@ -153,6 +153,15 @@ public class CodePoint {", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/NetConnectionRequest.java", "hunks": [ { "added": [ " // This specifies the SQL Error Diagnostic Level", " buildDIAGLVL();", "" ], "header": "@@ -395,6 +395,9 @@ public class NetConnectionRequest extends Request implements ConnectionRequestIn", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/NetCursor.java", "hunks": [ { "added": [], "header": "@@ -33,8 +33,6 @@ import org.apache.derby.client.am.SqlCode;", "removed": [ "import org.apache.derby.shared.common.reference.SQLState;", "" ] }, { "added": [ " boolean receivedRowUpdatedWarning = false;", " NetSqlca[] netSqlca = this.parseSQLCARD(qrydscTypdef_);", " for (int i=0;i<netSqlca.length; i++) {", " int sqlcode = netSqlca[i].getSqlCode();", " if (sqlcode < 0) {", " throw new SqlException(netAgent_.logWriter_, ", " netSqlca[i]);", " } else {", " if (netResultSet_ != null && ", " netSqlca[i].containsSqlcax()) {", " netResultSet_.setRowCountEvent(", " netSqlca[i].getRowCount(", " qrydscTypdef_));", " }", " } else if (netResultSet_ != null && sqlcode > 0) {", " String sqlState = netSqlca[i].getSqlState();", " if (!sqlState.equals(SQLState.ROW_DELETED) && ", " !sqlState.equals(SQLState.ROW_UPDATED)) {", " netResultSet_.accumulateWarning(", " new SqlWarning(agent_.logWriter_, ", " netSqlca[i]));", " } else {", " receivedDeleteHoleWarning ", " |= sqlState.equals(SQLState.ROW_DELETED);", " receivedRowUpdatedWarning ", " |= sqlState.equals(SQLState.ROW_UPDATED);", " setIsUpdataDeleteHole(rowIndex, receivedDeleteHoleWarning);", " setIsRowUpdated(receivedRowUpdatedWarning);", " " ], "header": "@@ -138,49 +136,51 @@ public class NetCursor extends org.apache.derby.client.am.Cursor {", "removed": [ " NetSqlca netSqlca = this.parseSQLCARD(qrydscTypdef_);", "", " if (netResultSet_ != null && netResultSet_.scrollable_) {", " if (netSqlca != null && ", " netSqlca.getSqlState().equals(SQLState.ROW_DELETED)) {", " receivedDeleteHoleWarning = true;", " netSqlca = null;", " } else {", " setIsUpdataDeleteHole(rowIndex, false);", " }", " if (netSqlca != null && ", " netSqlca.getSqlState().equals(SQLState.ROW_UPDATED)) {", " setIsRowUpdated(true);", " netSqlca = null;", " } else {", " setIsRowUpdated(false);", " }", " }", " int sqlcode = netSqlca.getSqlCode();", " if (sqlcode < 0) {", " throw new SqlException(netAgent_.logWriter_, netSqlca);", " } else {", " if (sqlcode > 0) {", " if (netResultSet_ != null && netSqlca.containsSqlcax()) {", " netResultSet_.setRowCountEvent(netSqlca.getRowCount(qrydscTypdef_));", " } else if (netResultSet_ != null) {", " netResultSet_.accumulateWarning(new SqlWarning(agent_.logWriter_, netSqlca));" ] }, { "added": [ " // Reads 8-bytes from the dataBuffer from the current position.", " // If position is already at the end of the buffer, send CNTQRY to get more ", " // data.", " private long readFdocaLong() throws ", " org.apache.derby.client.am.DisconnectException, SqlException {", " if ((position_ + 8) > lastValidBytePosition_) {", " // Check for ENDQRYRM, throw SqlException if already received one.", " checkAndThrowReceivedEndqryrm();", "", " // Send CNTQRY to complete the row/rowset.", " int lastValidByteBeforeFetch = completeSplitRow();", "", " // if lastValidBytePosition_ has not changed, and an ENDQRYRM was ", " // received, throw a SqlException for the ENDQRYRM.", " checkAndThrowReceivedEndqryrm(lastValidByteBeforeFetch);", " }", "", " long i = SignedBinary.getLong(dataBuffer_, position_);", " position_ += 8;", " return i;", " }", " " ], "header": "@@ -426,6 +426,28 @@ public class NetCursor extends org.apache.derby.client.am.Cursor {", "removed": [] }, { "added": [ " NetSqlca[] parseSQLCARD(Typdef typdef) throws org.apache.derby.client.am.DisconnectException, SqlException {" ], "header": "@@ -672,7 +694,7 @@ public class NetCursor extends org.apache.derby.client.am.Cursor {", "removed": [ " NetSqlca parseSQLCARD(Typdef typdef) throws org.apache.derby.client.am.DisconnectException, SqlException {" ] }, { "added": [ " private NetSqlca[] parseSQLCAGRP(Typdef typdef) throws org.apache.derby.client.am.DisconnectException, SqlException {" ], "header": "@@ -691,7 +713,7 @@ public class NetCursor extends org.apache.derby.client.am.Cursor {", "removed": [ " private NetSqlca parseSQLCAGRP(Typdef typdef) throws org.apache.derby.client.am.DisconnectException, SqlException {" ] }, { "added": [ " NetSqlca[] sqlCa = parseSQLDIAGGRP();", " NetSqlca[] ret_val;", " if (sqlCa != null) {", " ret_val = new NetSqlca[sqlCa.length + 1];", " System.arraycopy(sqlCa, 0, ret_val, 1, sqlCa.length);", " } else {", " ret_val = new NetSqlca[1];", " }", " ret_val[0] = netSqlca;", " ", " return ret_val;" ], "header": "@@ -702,9 +724,18 @@ public class NetCursor extends org.apache.derby.client.am.Cursor {", "removed": [ " parseSQLDIAGGRP();", " return netSqlca;" ] } ] }, { "file": "java/drda/org/apache/derby/impl/drda/CodePoint.java", "hunks": [ { "added": [ "\t// SQL Error Diagnostic Level", "\tstatic final int DIAGLVL = 0x2160;", "" ], "header": "@@ -490,6 +490,9 @@ class CodePoint", "removed": [] } ] }, { "file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java", "hunks": [ { "added": [ "\t// DRDA diagnostic level, DIAGLVL0 by default ", "\tprivate byte diagnosticLevel = (byte)0xF0; ", "" ], "header": "@@ -124,6 +124,9 @@ class DRDAConnThread extends Thread {", "removed": [] }, { "added": [ "\t\t\t\t// optional", "\t\t\t\tcase CodePoint.DIAGLVL:", "\t\t\t\t\tdiagnosticLevel = reader.readByte();", "\t\t\t\t\tbreak;" ], "header": "@@ -2934,6 +2937,10 @@ class DRDAConnThread extends Thread {", "removed": [] }, { "added": [ "\t\t// for now we only want to send ROW_DELETED and ROW_UPDATED warnings", "\t\t// as extended diagnostics", "\t\t// move to first ROW_DELETED or ROW_UPDATED exception. These have been", "\t\t// added to the end of the warning chain.", "\t\twhile (", "\t\t\t\tnextException != null && ", "\t\t\t\tnextException.getSQLState() != SQLState.ROW_UPDATED &&", "\t\t\t\tnextException.getSQLState() != SQLState.ROW_DELETED) {", "\t\t\tnextException = nextException.getNextException();", "\t\t}", "\t\tif ((nextException == null) || ", "\t\t\t\t(diagnosticLevel == CodePoint.DIAGLVL0)) {", "\t\twriter.writeByte(0); // SQLDIAGGRP indicator" ], "header": "@@ -5554,24 +5561,27 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\twriter.writeByte(CodePoint.NULLDATA);", "\t\treturn;", "\t\t/**", "\t\t * TODO: Enable the following code when JCC can support SQLDIAGGRP", "\t\t * for all SQLCARD accesses. Commented out for now.", "\t\t */", "\t\t/*", "\t\tif (nextException == null)", "\t\t{", "\t\t*/" ] }, { "added": [ "", "\t\t\t// SQLCode > 0 -> Warning", "\t\t\t// SQLCode = 0 -> Info", "\t\t\t// SQLCode < 0 -> Error", "\t\t\tint severity = getExceptionSeverity(se);", "\t\t\tint sqlCode = -1;", "\t\t\tif (severity == CodePoint.SVRCOD_WARNING)", "\t\t\t\tsqlCode = 1;", "\t\t\telse if (severity == CodePoint.SVRCOD_INFO)", "\t\t\t\tsqlCode = 0;", "", "\t\t\tif (diagnosticLevel == CodePoint.DIAGLVL1) {", "\t\t\t\tsqlerrmc = se.getLocalizedMessage();", "\t\t\t}", "", "\t\t\t// only send arguments for diagnostic level 0", "\t\t\tif (diagnosticLevel == CodePoint.DIAGLVL0) {", "\t\t\t\t// we are only able to get arguments of EmbedSQLException", "\t\t\t\tif (se instanceof EmbedSQLException) {", "\t\t\t\t\tObject[] args = ((EmbedSQLException)se).getArguments();", "\t\t\t\t\tfor (int i = 0; args != null && i < args.length; i++)", "\t\t\t\t\t\tsqlerrmc += args[i].toString() + SQLERRMC_TOKEN_DELIMITER;", "\t\t\t\t}", "\t\t\t}" ], "header": "@@ -5601,13 +5611,32 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\t\tint sqlCode = getSqlCode(getExceptionSeverity(se));", "\t\t\t\t", "\t\t\tObject[] args = ((EmbedSQLException)se).getArguments();", "\t\t\tfor (int i = 0; args != null && i < args.length; i++)", "\t\t\t\tsqlerrmc += args[i].toString() + SQLERRMC_TOKEN_DELIMITER;" ] }, { "added": [], "header": "@@ -5618,7 +5647,6 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\twriter.writeByte(CodePoint.NULLDATA);" ] }, { "added": [ "\t\twriter.writeShort(i);" ], "header": "@@ -5648,7 +5676,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\twriter.writeInt(i);" ] } ] } ]
derby-DERBY-1314-73d678d3
DERBY-1314: Differences between client and embedded when invoking stored procedures using Statement.executeUpdate() Make executeUpdate() return 0 on the client when executing a stored procedure. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@416696 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/PreparedStatement.java", "hunks": [ { "added": [ " private int executeUpdateX() throws SqlException {", " checkExecuteUpdatePostConditions(\"java.sql.PreparedStatement\");" ], "header": "@@ -386,13 +386,9 @@ public class PreparedStatement extends Statement", "removed": [ " // also used by Blob", " int executeUpdateX() throws SqlException {", "", " if (sqlMode_ == isUpdate__) {", " super.checkExecuteUpdatePostConditions(\"java.sql.PreparedStatement\");", " }" ] } ] } ]
derby-DERBY-1314-fade7e97
DERBY-501: Client and embedded drivers differ on invoking a procedure that returns a single Dynamic resultSet using CallableStatement.executeQuery() This patch modifies EmbedStatement.processDynamicResults() so that it returns the number of dynamic results instead of a boolean. EmbedStatement.executeStatement() uses this number to decide whether an exception is to be raised. With this change, the executeQuery and executeUpdate parameters are no longer needed in GenericPreparedStatement.execute(). ProcedureTest.junit is now enabled in derbyall (all frameworks). Seven of the test cases run in the embedded framework only, but I expect all of them to succeed with the client driver after DERBY-1314 and DERBY-1364 have been fixed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@414795 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/PreparedStatement.java", "hunks": [ { "added": [], "header": "@@ -101,8 +101,6 @@ public interface PreparedStatement", "removed": [ "\t * @param executeQuery\t\tWhether or not called from a Statement.executeQuery()", "\t * @param executeUpdate\tWhether or not called from a Statement.executeUpdate()" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java", "hunks": [ { "added": [ " ps.execute(act, true, 0L); " ], "header": "@@ -3485,7 +3485,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " ps.execute(act, false, true, true, 0L); " ] }, { "added": [ " // Execute the update where current of sql.", " org.apache.derby.iapi.sql.ResultSet rs = ps.execute(act, true, 0L);" ], "header": "@@ -3556,7 +3556,8 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " org.apache.derby.iapi.sql.ResultSet rs = ps.execute(act, false, true, true, 0L); //execute the update where current of sql" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement.java", "hunks": [ { "added": [ "", " // The statement returns rows, so calling it with", " // executeUpdate() is not allowed.", " if (executeUpdate) {", " throw StandardException.newException(", " SQLState.LANG_INVALID_CALL_TO_EXECUTE_UPDATE);", " }", "" ], "header": "@@ -1179,14 +1179,20 @@ public class EmbedStatement extends ConnectionChild", "removed": [ " executeQuery,", " executeUpdate," ] }, { "added": [ " int dynamicResultCount = 0;", " dynamicResultCount =", " processDynamicResults(a.getDynamicResults(),", " a.getMaxDynamicResults());", "", " // executeQuery() is not allowed if the statement", " // doesn't return exactly one ResultSet.", " if (executeQuery && dynamicResultCount != 1) {", " throw StandardException.newException(", " SQLState.LANG_INVALID_CALL_TO_EXECUTE_QUERY);", " }", "", " // executeUpdate() is not allowed if the statement", " // returns ResultSets.", " if (executeUpdate && dynamicResultCount > 0) {", " throw StandardException.newException(", " SQLState.LANG_INVALID_CALL_TO_EXECUTE_UPDATE);", " }", " if (dynamicResultCount == 0) {" ], "header": "@@ -1217,12 +1223,28 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\t\t\t\t\tboolean haveDynamicResults = false;", "\t\t\t\t\t\thaveDynamicResults = processDynamicResults(a.getDynamicResults(), a.getMaxDynamicResults());", "\t\t\t\t\tif (!haveDynamicResults) {" ] }, { "added": [ " retval = (dynamicResultCount > 0);" ], "header": "@@ -1240,7 +1262,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\t\t\t\t\tretval = haveDynamicResults;" ] }, { "added": [ "", " /**", " * Go through a holder of dynamic result sets, remove those that", " * should not be returned, and sort the result sets according to", " * their creation.", " *", " * @param holder a holder of dynamic result sets", " * @param maxDynamicResultSets the maximum number of result sets", " * to be returned", " * @return the actual number of result sets", " * @exception SQLException if an error occurs", " */", " private int processDynamicResults(java.sql.ResultSet[][] holder,", " int maxDynamicResultSets)", " throws SQLException", " {" ], "header": "@@ -1446,7 +1468,22 @@ public class EmbedStatement extends ConnectionChild", "removed": [ "\tprivate boolean processDynamicResults(java.sql.ResultSet[][] holder, int maxDynamicResultSets) throws SQLException {" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/GenericPreparedStatement.java", "hunks": [ { "added": [ "\t\treturn execute(a, rollbackParentContext, timeoutMillis);" ], "header": "@@ -237,15 +237,13 @@ public class GenericPreparedStatement", "removed": [ "\t\treturn execute(a, false, false, rollbackParentContext, timeoutMillis);", "\t * @param\texecuteQuery\t\t\t\tCalled via executeQuery", "\t * @param\texecuteUpdate\t\t\t\tCalled via executeUpdate" ] }, { "added": [], "header": "@@ -256,8 +254,6 @@ public class GenericPreparedStatement", "removed": [ " boolean executeQuery,", " boolean executeUpdate," ] } ] } ]
derby-DERBY-1315-03eae1d7
DERBY-1315 (minor cleanup) Remove a couple of unused fields and associated methods to save runtime space. Reduce the scope of some fields and methods to better understand their use. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@430173 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/FromTable.java", "hunks": [ { "added": [ "abstract class FromTable extends ResultSetNode implements Optimizable" ], "header": "@@ -65,7 +65,7 @@ import java.util.HashMap;", "removed": [ "public abstract class FromTable extends ResultSetNode implements Optimizable" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/SetOperatorNode.java", "hunks": [ { "added": [ "abstract class SetOperatorNode extends TableOperatorNode" ], "header": "@@ -52,7 +52,7 @@ import java.util.HashMap;", "removed": [ "public abstract class SetOperatorNode extends TableOperatorNode" ] }, { "added": [ "\tprivate PredicateList leftOptPredicates;", "\tprivate PredicateList rightOptPredicates;", "\tprivate PredicateList pushedPredicates;", "\tprivate HashMap leftScopedPreds;", "\tprivate HashMap rightScopedPreds;" ], "header": "@@ -63,17 +63,17 @@ public abstract class SetOperatorNode extends TableOperatorNode", "removed": [ "\tPredicateList leftOptPredicates;", "\tPredicateList rightOptPredicates;", "\tPredicateList pushedPredicates;", "\tHashMap leftScopedPreds;", "\tHashMap rightScopedPreds;" ] }, { "added": [ "\tPredicateList getLeftOptPredicateList()" ], "header": "@@ -1014,7 +1014,7 @@ public abstract class SetOperatorNode extends TableOperatorNode", "removed": [ "\tprotected PredicateList getLeftOptPredicateList()" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/TableOperatorNode.java", "hunks": [ { "added": [ "abstract class TableOperatorNode extends FromTable" ], "header": "@@ -58,9 +58,8 @@ import java.util.Properties;", "removed": [ "public abstract class TableOperatorNode extends FromTable", "\tboolean\t\t\tnestedInParens;" ] }, { "added": [ "\t\t\treturn \"nestedInParens: \" + false + \"\\n\" +" ], "header": "@@ -209,7 +208,7 @@ public abstract class TableOperatorNode extends FromTable", "removed": [ "\t\t\treturn \"nestedInParens: \" + nestedInParens + \"\\n\" +" ] }, { "added": [ "\t * The resulting state of this cal was never used so its", "\t * field was removed to save runtimespace for this node.", "\t * Further cleanup can be done including parser changes", "\t * if this call is really nor required." ], "header": "@@ -324,24 +323,15 @@ public abstract class TableOperatorNode extends FromTable", "removed": [ "\t\tthis.nestedInParens = nestedInParens;", "\t}", "", "\t/**", "\t * Return whether or not the table operator for this node was", "\t * nested in parens in the query. (Useful to parser", "\t * since some trees get created left deep and others right deep.)", "\t *", "\t * @return boolean\t\tWhether or not this node was nested in parens.", "\t */", "\tpublic boolean getNestedInParens()", "\t{", "\t\treturn nestedInParens;" ] } ] } ]
derby-DERBY-1315-8aff1cda
DERBY-766 DERBY-1714 Working method in CodeChunk that splits expressions out of generated methods that are too large. Bumps the number of unions supported in largeCodeGen to over 6,000 from around 800. Also increases the number of rows supported in a VALUES clause. A large number of UNION clauses still requires a large amount of memory for optimization (see DERBY-1315). A large number of rows in a VALUES clause fails at some point due to a StackOverflow. Subsequent commit will modify largeCodeGen to be a JUnit test and adapt to these changes but running into issues finding a useful workign limits that can produce repeatable results without hitting memory issues. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@432856 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/bytecode/BCMethod.java", "hunks": [ { "added": [ " " ], "header": "@@ -74,7 +74,7 @@ class BCMethod implements MethodBuilder {", "removed": [ "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/bytecode/CodeChunk.java", "hunks": [ { "added": [ " ", " int splitMinLength = splitMinLength(mb);", " " ], "header": "@@ -1285,6 +1285,9 @@ final class CodeChunk {", "removed": [] }, { "added": [ " // worth it.", " if (possibleSplitLength <= splitMinLength)" ], "header": "@@ -1374,11 +1377,8 @@ final class CodeChunk {", "removed": [ " // worth it. 100 is an arbitary number,", " // a real low limit would be the number of", " // bytes of instructions required to call", " // the sub-method, four I think.", " if (possibleSplitLength < 100)" ] }, { "added": [ " final int splitExpressionOut(final BCMethod mb, final ClassHolder ch,", " final int maxStack)", " String bestSplitRT = null; ", " ", " int splitMinLength = splitMinLength(mb);" ], "header": "@@ -1718,14 +1718,16 @@ final class CodeChunk {", "removed": [ " final int splitExpressionOut(BCMethod mb, ClassHolder ch,", " int maxStack)", " String bestSplitRT = null; " ] }, { "added": [ " ", " // TODO: This conditional handling was copied", " // from splitZeroStack, haven't looked in detail", " // to see how a conditional should be handled", " // with an expression split. So for the time", " // being just bail.", " if (true)", " return -1;", "" ], "header": "@@ -1781,6 +1783,15 @@ final class CodeChunk {", "removed": [] }, { "added": [ " // no plan to split here though, as we are only", " // splitting methods that return a reference.", " selfContainedBlockStart = -1;", " // earliestIndepPC[stack + 1];" ], "header": "@@ -1920,8 +1931,10 @@ final class CodeChunk {", "removed": [ " selfContainedBlockStart =", " earliestIndepPC[stack + 1];" ] }, { "added": [ " // no plan to split here though, as we are only", " // splitting methods that return a reference.", " selfContainedBlockStart = -1;", " ", " // top two words depend on the objectref", " // which was at the same depth of the first word", " // of the 64 bit value.", " earliestIndepPC[stack] =", " if (blockLength <= splitMinLength)", " // No point splitting, too small", " }", " else if (blockLength > (VMOpcode.MAX_CODE_LENGTH - 1))", " {", " // too big to split into a single method", " // (one for the return opcode)", " }", " else", " {", " // Only split for a method that returns", " // an class reference.", " int me = vmDescriptor.lastIndexOf(')');", " ", " if (vmDescriptor.charAt(me+1) == 'L')", " String rt = vmDescriptor.substring(me + 2,", " vmDescriptor.length() - 1);", " ", " // convert to external format.", " rt = rt.replace('/', '.');", " ", " if (blockLength >= optimalMinLength)", " {", " // Split now!", " BCMethod subMethod = startSubMethod(mb,", " rt, selfContainedBlockStart,", " blockLength);", " ", " return splitCodeIntoSubMethod(mb, ch, subMethod,", " selfContainedBlockStart, blockLength); ", " } ", " else if (blockLength > bestSplitBlockLength)", " {", " // Save it, may split at this point", " // if nothing better seen.", " bestSplitPC = selfContainedBlockStart;", " bestSplitBlockLength = blockLength;", " bestSplitRT = rt;", " }" ], "header": "@@ -1933,47 +1946,62 @@ final class CodeChunk {", "removed": [ " selfContainedBlockStart = earliestIndepPC[stack] =", "", " // Only split for a method that returns", " // an class reference.", " int me = vmDescriptor.lastIndexOf(')');", " if (vmDescriptor.charAt(me+1) == 'L')", " String rt = vmDescriptor.substring(me + 2,", " vmDescriptor.length() - 1);", " ", " if (blockLength > (VMOpcode.MAX_CODE_LENGTH - 1))", " {", " // too big to split into a single method", " // (one for the return opcode)", " } ", " else if (blockLength >= optimalMinLength)", " {", " // Split now!", " System.out.println(\"NOW \" + blockLength", " + \" @ \" + selfContainedBlockStart);", " BCMethod subMethod = startSubMethod(mb,", " rt, selfContainedBlockStart,", " blockLength);", "", " return splitCodeIntoSubMethod(mb, ch, subMethod,", " selfContainedBlockStart, blockLength); ", " } ", " else if (blockLength > bestSplitBlockLength)", " // Save it, may split at this point", " // if nothing better seen. ", " bestSplitPC = selfContainedBlockStart;", " bestSplitBlockLength = blockLength;", " bestSplitRT = rt;" ] }, { "added": [ "", " if (bestSplitBlockLength != -1) {", " ", " bestSplitPC, bestSplitBlockLength); ", " " ], "header": "@@ -1983,19 +2011,16 @@ final class CodeChunk {", "removed": [ " if (bestSplitBlockLength > 100)", " {", " System.out.println(\"BEST \" + bestSplitBlockLength", " + \" @ \" + bestSplitPC);", "", " bestSplitBlockLength, bestSplitBlockLength); ", " ", " " ] }, { "added": [ " /**", " * Minimum split length for a sub-method. If the number of", " * instructions to call the sub-method exceeds the length", " * of the sub-method, then there's no point splitting.", " * The number of bytes in the code stream to call", " * a generated sub-method can take is based upon the number of method args.", " * A method can have maximum of 255 words of arguments (section 4.10 JVM spec)", " * which in the worst case would be 254 (one-word) parameters", " * and this. For a sub-method the arguments will come from the", " * parameters to the method, i.e. ALOAD, ILOAD etc.", " * <BR>", " * This leads to this number of instructions.", " * <UL>", " * <LI> 4 - 'this' and first 3 parameters have single byte instructions", " * <LI> (N-4)*2 - Remaining parameters have two byte instructions", " * <LI> 3 for the invoke instruction.", " * </UL>", " */", " private static int splitMinLength(BCMethod mb) {", " int min = 1 + 3; // For ALOAD_0 (this) and invoke instruction", " ", " if (mb.parameters != null) {", " int paramCount = mb.parameters.length;", " ", " min += paramCount;", " ", " if (paramCount > 3)", " min += (paramCount - 3);", " }", " ", " return min;", " }" ], "header": "@@ -2020,6 +2045,38 @@ final class CodeChunk {", "removed": [] } ] } ]
derby-DERBY-1315-acdff3cd
DERBY-1315 This patch adds a small amount of logic to remove entries from an Optimizable's "best plan" HashMap when they are no longer needed. For more on when this is possible, see the discussion here: http://article.gmane.org/gmane.comp.apache.db.derby.devel/26051 Patch contributed by "A B" qozinx@gmail.com git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@439083 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/FromTable.java", "hunks": [ { "added": [ "\t Set of object->trulyTheBestAccessPath mappings used to keep track", "\t with respect to a specific outer query or ancestor node. In the case", "\t of an outer query, the object key will be an instance of OptimizerImpl.", "\t In the case of an ancestor node, the object key will be that node itself.", "\t Each ancestor node or outer query could potentially have a different", "\t idea of what this Optimizable's \"best access path\" is, so we have to", "\t keep track of them all.", "\tprivate HashMap bestPlanMap;", "", "\t/** Operations that can be performed on bestPlanMap. */", "\tprotected static final short REMOVE_PLAN = 0;", "\tprotected static final short ADD_PLAN = 1;", "\tprotected static final short LOAD_PLAN = 2;" ], "header": "@@ -102,14 +102,21 @@ abstract class FromTable extends ResultSetNode implements Optimizable", "removed": [ "\t Set of optimizer->trulyTheBestAccessPath mappings used to keep track", "\t with respect to a specific outer query; the outer query is represented", "\t by an instance of Optimizer. Each outer query could potentially have", "\t a different idea of what this Optimizable's \"best access path\" is, so", "\t we have to keep track of them all.", "\tprivate HashMap optimizerToBestPlanMap;" ] }, { "added": [ "\t\tbestPlanMap = null;" ], "header": "@@ -122,7 +129,7 @@ abstract class FromTable extends ResultSetNode implements Optimizable", "removed": [ "\t\toptimizerToBestPlanMap = null;" ] }, { "added": [ "\t\tupdateBestPlanMap(ADD_PLAN, this);" ], "header": "@@ -157,7 +164,7 @@ abstract class FromTable extends ResultSetNode implements Optimizable", "removed": [ "\t\taddOrLoadBestPlanMapping(true, this);" ] }, { "added": [ "\t/** @see Optimizable#updateBestPlanMap */", "\tpublic void updateBestPlanMap(short action,", "\t\tif (action == REMOVE_PLAN)", "\t\t{", "\t\t\tif (bestPlanMap != null)", "\t\t\t{", "\t\t\t\tbestPlanMap.remove(planKey);", "\t\t\t\tif (bestPlanMap.size() == 0)", "\t\t\t\t\tbestPlanMap = null;", "\t\t\t}", "", "\t\t\treturn;", "\t\t}", "", "\t\tif (action == ADD_PLAN)", "\t\t\t// If the bestPlanMap already exists, search for an", "\t\t\tif (bestPlanMap == null)", "\t\t\t\tbestPlanMap = new HashMap();", "\t\t\t\tap = (AccessPathImpl)bestPlanMap.get(planKey);" ], "header": "@@ -507,25 +514,37 @@ abstract class FromTable extends ResultSetNode implements Optimizable", "removed": [ "\t/** @see Optimizable#addOrLoadBestPlanMapping */", "\tpublic void addOrLoadBestPlanMapping(boolean doAdd,", "\t\tif (doAdd)", "\t\t\t// If the optimizerToBestPlanMap already exists, search for an", "\t\t\tif (optimizerToBestPlanMap == null)", "\t\t\t\toptimizerToBestPlanMap = new HashMap();", "\t\t\t\tap = (AccessPathImpl)optimizerToBestPlanMap.get(planKey);" ] }, { "added": [ "\t\t\tbestPlanMap.put(planKey, ap);" ], "header": "@@ -540,7 +559,7 @@ abstract class FromTable extends ResultSetNode implements Optimizable", "removed": [ "\t\t\toptimizerToBestPlanMap.put(planKey, ap);" ] }, { "added": [ "\t\tif (bestPlanMap == null)", "\t\tap = (AccessPathImpl)bestPlanMap.get(planKey);" ], "header": "@@ -550,10 +569,10 @@ abstract class FromTable extends ResultSetNode implements Optimizable", "removed": [ "\t\tif (optimizerToBestPlanMap == null)", "\t\tap = (AccessPathImpl)optimizerToBestPlanMap.get(planKey);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/OptimizerImpl.java", "hunks": [ { "added": [ "\t\t\tendOfRoundCleanup();" ], "header": "@@ -366,6 +366,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\t\t\tpullMe.updateBestPlanMap(FromTable.LOAD_PLAN, this);" ], "header": "@@ -980,7 +981,7 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t\t\t\t\tpullMe.addOrLoadBestPlanMapping(false, this);" ] }, { "added": [ "", "\t\t\t\t\tendOfRoundCleanup();" ], "header": "@@ -1133,7 +1134,9 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\tendOfRoundCleanup();" ], "header": "@@ -1170,6 +1173,7 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\t\tpullMe.updateBestPlanMap(FromTable.LOAD_PLAN, this);" ], "header": "@@ -1183,7 +1187,7 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t\t\t\tpullMe.addOrLoadBestPlanMapping(false, this);" ] }, { "added": [ "\t/**", "\t * Do any work that needs to be done after the current round", "\t * of optimization has completed. For now this just means walking", "\t * the subtrees for each optimizable and removing the \"bestPlan\"", "\t * that we saved (w.r.t to this OptimizerImpl) from all of the", "\t * nodes. If we don't do this post-optimization cleanup we", "\t * can end up consuming a huge amount of memory for deeply-", "\t * nested queries, which can lead to OOM errors. DERBY-1315.", "\t */", "\tprivate void endOfRoundCleanup()", "\t\tthrows StandardException", "\t{", "\t\tfor (int i = 0; i < numOptimizables; i++)", "\t\t{", "\t\t\toptimizableList.getOptimizable(i).", "\t\t\t\tupdateBestPlanMap(FromTable.REMOVE_PLAN, this);", "\t\t}", "\t}", "" ], "header": "@@ -1192,6 +1196,25 @@ public class OptimizerImpl implements Optimizer", "removed": [] }, { "added": [ "\t\t\t\tcurOpt.updateBestPlanMap(FromTable.LOAD_PLAN, curOpt);" ], "header": "@@ -1309,7 +1332,7 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t\t\t\tcurOpt.addOrLoadBestPlanMapping(false, curOpt);" ] }, { "added": [ "\t\t\t\tcurOpt.updateBestPlanMap(FromTable.LOAD_PLAN, curOpt);", "\t\t/* If we needed to revert plans for curOpt, we just did it above.", "\t\t * So we no longer need to keep the previous best plan--and in fact,", "\t\t * keeping it can lead to extreme memory usage for very large", "\t\t * queries. So delete the stored plan for curOpt. DERBY-1315.", "\t\t */", "\t\tcurOpt.updateBestPlanMap(FromTable.REMOVE_PLAN, curOpt);", "" ], "header": "@@ -1319,10 +1342,17 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t\t\t\tcurOpt.addOrLoadBestPlanMapping(false, curOpt);" ] }, { "added": [ "\t * Process (i.e. add, load, or remove) current best join order as the", "\t * best one for some outer query or ancestor node, represented by another", "\t * OptimizerImpl or an instance of FromTable, respectively. Then", "\t * to do the same. See Optimizable.updateBestPlan() for more on why", "\t * this is necessary.", "\t * @param action Indicates whether to add, load, or remove the plan", "\tprotected void updateBestPlanMaps(short action,", "\t\t// First we process this OptimizerImpl's best join order. If there's", "\t\t\tif (action == FromTable.REMOVE_PLAN)", "\t\t\t{", "\t\t\t\tif (savedJoinOrders != null)", "\t\t\t\t{", "\t\t\t\t\tsavedJoinOrders.remove(planKey);", "\t\t\t\t\tif (savedJoinOrders.size() == 0)", "\t\t\t\t\t\tsavedJoinOrders = null;", "\t\t\t\t}", "\t\t\t}", "\t\t\telse if (action == FromTable.ADD_PLAN)" ], "header": "@@ -2373,30 +2403,39 @@ public class OptimizerImpl implements Optimizer", "removed": [ "\t * Remember the current best join order as the best one for", "\t * some outer query, represented by another OptimizerImpl. Then", "\t * to remember its best plan with respect to the outer query.", "\t * See Optimizable.addOrLoadBestPlan() for more on why this is", "\t * necessary.", "\t * @param doAdd True if we're adding a mapping, false if we're loading.", "\tprotected void addOrLoadBestPlanMappings(boolean doAdd,", "\t\t// First we save this OptimizerImpl's best join order. If there's", "\t\t\tif (doAdd)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/SingleChildResultSetNode.java", "hunks": [ { "added": [ "\t * @see Optimizable#updateBestPlanMap", "\t * Makes a call to add/load/remove a plan mapping for this node,", "\t * child, in order to ensure that we've handled the full plan", "\t * all the way down this node's subtree.", "\tpublic void updateBestPlanMap(short action,", "\t\tsuper.updateBestPlanMap(action, planKey);" ], "header": "@@ -165,16 +165,17 @@ abstract class SingleChildResultSetNode extends FromTable", "removed": [ "\t * @see Optimizable#addOrLoadBestPlanMapping", "\t * Makes a call to add/load the plan mapping for this node,", "\t * child, in order to ensure that we have a full plan mapped.", "\tpublic void addOrLoadBestPlanMapping(boolean doAdd,", "\t\tsuper.addOrLoadBestPlanMapping(doAdd, planKey);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/TableOperatorNode.java", "hunks": [ { "added": [ "\t * @see Optimizable#updateBestPlanMap", "\t * Makes a call to add/load/remove the plan mapping for this node,", "\t * left and right child, in order to ensure that we've handled", "\t * the full plan all the way down this node's subtree. ", "\tpublic void updateBestPlanMap(short action,", "\t\tsuper.updateBestPlanMap(action, planKey);" ], "header": "@@ -156,17 +156,17 @@ abstract class TableOperatorNode extends FromTable", "removed": [ "\t * @see Optimizable#addOrLoadBestPlanMapping", "\t * Makes a call to add/load the plan mapping for this node,", "\t * left and right child, in order to ensure that we have a", "\t * full plan mapped.", "\tpublic void addOrLoadBestPlanMapping(boolean doAdd,", "\t\tsuper.addOrLoadBestPlanMapping(doAdd, planKey);" ] }, { "added": [ "\t\t\t\tupdateBestPlanMap(action, planKey);", "\t\t\t\tupdateBestPlanMaps(action, planKey);", "\t\t\t\tupdateBestPlanMap(action, planKey);", "\t\t\t\tupdateBestPlanMaps(action, planKey);" ], "header": "@@ -177,23 +177,23 @@ abstract class TableOperatorNode extends FromTable", "removed": [ "\t\t\t\taddOrLoadBestPlanMapping(doAdd, planKey);", "\t\t\t\taddOrLoadBestPlanMappings(doAdd, planKey);", "\t\t\t\taddOrLoadBestPlanMapping(doAdd, planKey);", "\t\t\t\taddOrLoadBestPlanMappings(doAdd, planKey);" ] } ] } ]
derby-DERBY-1315-b1397ecd
DERBY-766 DERBY-1714 Convert largeCodeGen to a JUnit test, add it to the lang._Suite and add that to the derbylang.runall old harness suite. Added tests for insert a large number of rows with a VALUES clause. Test needs further improvements due to errors from DERBY-1315 and stack overflow with a large INSERT VALUES clause. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@433085 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/JDBC.java", "hunks": [ { "added": [ "\t * Provides simple testing of the ResultSet when the contents" ], "header": "@@ -299,7 +299,7 @@ public class JDBC {", "removed": [ "\t * Provides simple testing of the ResultSet when then contents" ] }, { "added": [ " ", " /**", " * Assert a SQL state is the expected value.", " * @param expected Expected SQLState.", " * @param sqle SQLException caught", " */", " public static void assertSQLState(String expected, SQLException sqle)", " {", " Assert.assertEquals(\"Unexpected SQL State\", expected, sqle.getSQLState());", " }" ], "header": "@@ -318,6 +318,16 @@ public class JDBC {", "removed": [] } ] } ]
derby-DERBY-1315-df5ffc85
DERBY-1315 (minor cleanup) Make optimizerToBestPlanMap private scope and change its description into a javadoc comment. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@429893 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/FromTable.java", "hunks": [ { "added": [ "\t/**", "\t Set of optimizer->trulyTheBestAccessPath mappings used to keep track", "\t of which of this Optimizable's \"trulyTheBestAccessPath\" was the best", "\t with respect to a specific outer query; the outer query is represented", "\t by an instance of Optimizer. Each outer query could potentially have", "\t a different idea of what this Optimizable's \"best access path\" is, so", "\t we have to keep track of them all.", "\t*/", "\tprivate HashMap optimizerToBestPlanMap;" ], "header": "@@ -101,13 +101,15 @@ public abstract class FromTable extends ResultSetNode implements Optimizable", "removed": [ "\t// Set of optimizer->trulyTheBestAccessPath mappings used to keep track", "\t// of which of this Optimizable's \"trulyTheBestAccessPath\" was the best", "\t// with respect to a specific outer query; the outer query is represented", "\t// by an instance of Optimizer. Each outer query could potentially have", "\t// a different idea of what this Optimizable's \"best access path\" is, so", "\t// we have to keep track of them all.", "\tHashMap optimizerToBestPlanMap;" ] } ] } ]
derby-DERBY-1322-10b9cb1b
DERBY-1322: Missing resets of isOnInsertRow state in net client when navigating away via other than ResultSet#next. Submitted by Fernanda Pizzorno git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@409170 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/ResultSet.java", "hunks": [ { "added": [ " moveToCurrentRowX();" ], "header": "@@ -281,10 +281,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " if (isOnInsertRow_) {", " isOnInsertRow_ = false;", " isOnCurrentRow_ = true;", " }" ] }, { "added": [ " moveToCurrentRowX();", "" ], "header": "@@ -2111,6 +2108,8 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [] }, { "added": [ " ", " moveToCurrentRowX();" ], "header": "@@ -2148,6 +2147,8 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [] }, { "added": [ " ", " moveToCurrentRowX();" ], "header": "@@ -2188,6 +2189,8 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [] }, { "added": [ " ", " moveToCurrentRowX();" ], "header": "@@ -2241,6 +2244,8 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [] }, { "added": [ " moveToCurrentRowX();", "" ], "header": "@@ -2357,6 +2362,8 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [] }, { "added": [ " ", " moveToCurrentRowX();", " " ], "header": "@@ -2439,6 +2446,9 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [] }, { "added": [], "header": "@@ -2446,12 +2456,6 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " // this method may not be called when the cursor on the insert row", " if (isOnInsertRow_) {", " throw new SqlException(agent_.logWriter_, ", " new ClientMessageId(SQLState.CURSOR_INVALID_OPERATION_AT_CURRENT_POSITION));", " }", "" ] }, { "added": [ " ", " moveToCurrentRowX();" ], "header": "@@ -2569,6 +2573,8 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [] }, { "added": [ " moveToCurrentRowX();" ], "header": "@@ -3741,17 +3747,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " if (!isOnInsertRow_) {", " // no affect", " } else {", " resetUpdatedColumns();", " isOnInsertRow_ = false;", " isOnCurrentRow_ = true;", " if (currentRowInRowset_ > 0) {", " updateColumnInfoFromCache();", " }", " isValidCursorPosition_ = true;", " }" ] } ] } ]
derby-DERBY-1323-6b0118a9
DERBY-1323 Detectability methods rowUpdated, rowInserted, rowDeleted can be called from illegal states in both clients. Submitted by Dag H. Wanvik git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@408875 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/ResultSet.java", "hunks": [ { "added": [ " checkPositionedOnPlainRow();" ], "header": "@@ -2725,6 +2725,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [] }, { "added": [ " checkPositionedOnPlainRow();", "", " boolean rowInserted = false;", "", " // Not implemented for any result set type,", " // so it always returns false.", "" ], "header": "@@ -2742,8 +2743,14 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " boolean rowInserted = false;" ] }, { "added": [ " checkPositionedOnPlainRow();", " boolean rowDeleted = ", " (resultSetType_ == ResultSet.TYPE_SCROLL_INSENSITIVE) ?", " cursor_.getIsUpdateDeleteHole() : false;" ], "header": "@@ -2759,10 +2766,11 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " boolean rowDeleted = (resultSetType_ == ResultSet.TYPE_SCROLL_INSENSITIVE) ?", "\t\tcursor_.getIsUpdateDeleteHole() :", "\t\tfalse;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java", "hunks": [ { "added": [ "\t\tcheckNotOnInsertRow();", "\t\tcheckOnRow();", "" ], "header": "@@ -2155,6 +2155,9 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "\t\tcheckNotOnInsertRow();", "\t\tcheckOnRow();", "" ], "header": "@@ -2182,6 +2185,9 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] } ] } ]
derby-DERBY-1325-fe8bb68e
DERBY-1325 Isolation level of local connection does not get reset after exiting a global transaction if the isolation level was changed using SQL Attaching a patch 'derby-1325-v1.diff' which ensures correct isolation level gets used once we switch back to local mode when SQL is used to set the isolation level. Patch does the following: * Adds call to get the isolation level up to date when joining/resuming a global transaction. This will make sure the BrokeredConnection object has the correct isolation level to be used when we switch back to local mode. * Adds a test to jdbcapi/checkDataSource.java and modifies the master files. With this patch, I ran derbyall with Sun jdk 1.4.2 on Windows XP. No failures. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@409002 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1326-25d6720b
DERBY-1219: jdbcapi/checkDataSource test hangs intermittently with client This patch contributed by Deepa Remesh (dremesh@gmail.com). This patch enables the checkDataSource and checkDataSource30 tests to run with the client framework by removing the code which shuts down the system in the middle of the test. This is the code which causes the intermediate hang. The hanging problem has been logged as a separate issue, DERBY-1326. The shutdown is a valuable part of the test because it verifies that the global transaction state is valid even after the database has been shut down, so once the hang problem has been resolved, this test should be modified again to re-enable the shutdown processing with the client framework. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@406776 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1326-37c3287b
DERBY-1326 (partial) Network server may abandon sessions when Derby system is shutdown and this causes intermittent hangs in the client Two small cleanups: Make NetworkServerControlImpl.startNetworkServer() remove the sessions it closes from sessionTable. Also, synchronize on runQueue to prevent modifications while the queue is traversed. Invoke closeSession() and close() in a finally clause in DRDAConnThread.handleException() to ensure proper closing of the session when an unexpected error happens. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@446538 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/NetworkServerControlImpl.java", "hunks": [ { "added": [ "\t\t\t\t\t\tsynchronized (runQueue) {", "\t\t\t\t\t\t\tfor (int i = 0; i < runQueue.size(); i++) {", "\t\t\t\t\t\t\t\tSession s = (Session) runQueue.get(i);", "\t\t\t\t\t\t\t\ts.close();", "\t\t\t\t\t\t\t\tremoveFromSessionTable(s.getConnNum());", "\t\t\t\t\t\t\t}", "\t\t\t\t\t\t\trunQueue.clear();", "\t\t\t\t\t\t}" ], "header": "@@ -743,9 +743,14 @@ public final class NetworkServerControlImpl {", "removed": [ "\t\t\t\t\t\tfor (int i = 0; i < runQueue.size(); i++)", "\t\t\t\t\t\t\t((Session)runQueue.get(i)).close();", "\t\t\t\t\t\trunQueue.clear();" ] } ] } ]
derby-DERBY-1326-4b086587
DERBY-1326: Network server may abandon sessions when Derby system is shutdown and this causes intermittent hangs in the client Re-enable shutdown in checkDataSource test. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@447462 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1326-57fd882d
DERBY-1326: Network server may abandon sessions when Derby system is shutdown and this causes intermittent hangs in the client Added JUnit test case which reliably reproduces the hang. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@449616 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1326-d702ebab
DERBY-1326 (partial) Network server may abandon sessions when Derby system is shutdown and this causes intermittent hangs in the client Don't poison the network server's worker threads when an engine shutdown is detected. Poisoning the threads could lead to abandoning of sessions and hangs in the client. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@447375 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/NetworkServerControlImpl.java", "hunks": [ { "added": [ "\t\t\t\t\t\t// DERBY-1326: There could be active threads that", "\t\t\t\t\t\t// contain old/invalid sessions. These sessions won't", "\t\t\t\t\t\t// be cleaned up until there is some activity on", "\t\t\t\t\t\t// them. We could optimize this by going through", "\t\t\t\t\t\t// sessionTable and closing the sessions' socket", "\t\t\t\t\t\t// streams." ], "header": "@@ -752,11 +752,12 @@ public final class NetworkServerControlImpl {", "removed": [ "\t\t\t\t\t\t// Close and remove DRDAConnThreads on threadList.", "\t\t\t\t\t\tfor (int i = 0; i < threadList.size(); i++)", "\t\t\t\t\t\t\t((DRDAConnThread)threadList.get(i)).close();", "\t\t\t\t\t\tthreadList.clear();", "\t\t\t\t\t\tfreeThreads = 0;" ] } ] } ]
derby-DERBY-1327-318307e7
DERBY-1327 Identity column can be created with wrong and very large start with v alue with "J2RE 1.5.0 IBM Windows 32 build pwi32dev-20060412 (SR2)" with JIT on The fix for this issue would be to reduce the number of parameters required by the constructor to <=10 in org.apache.derby.iapi.sql.dictionary.ColumnDescriptor class. While researching into this, I found that all the 3 constructors in the class have a parameter named autoinc and it is defined as a boolean. This parameter is always equal to (parameter named autoincInc != 0). In my patch (Derby1327WrongStartKeyPatch1CodelineTrunk.txt) which is attached to this JIRA, I have removed the autoinc parameter and inside the constructors, I use (parameter named autoincInc != 0) instead of relying on autoinc. This cleans up the constructor parameter passing for all the 3 constructors and also brings down the number of parameters to <=10. The test program from the JIRA entry runs fine with this change and I have created a new test JitTest.java based on that test program. Hopefully this test can be a place holder for any future JIT issues. I also ran the test suites and there were no new failures. I also removed the import of org.apache.derby.iapi.sql.dictionary.ColumnDescriptor from some classes which didn't really use ColumnDescriptor. Contributed by Mamta Satoor git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@413129 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/dictionary/ColumnDescriptor.java", "hunks": [ { "added": [], "header": "@@ -95,7 +95,6 @@ public class ColumnDescriptor extends TupleDescriptor", "removed": [ "\t * @param autoinc\t\tboolean value for sanity checking." ] }, { "added": [ "\t\t\t\t\t UUID defaultUUID, long autoincStart, long autoincInc, ", "\t\t\t\tautoincInc);\t\t\t\t" ], "header": "@@ -105,12 +104,12 @@ public class ColumnDescriptor extends TupleDescriptor", "removed": [ "\t\t\t\t\t UUID defaultUUID, long autoincStart, long autoincInc, boolean autoinc,", "\t\t\t\tautoincInc, autoinc);\t\t\t\t" ] }, { "added": [ "\t\t\t\t\t\t UUID defaultUUID, long autoincStart, long autoincInc)" ], "header": "@@ -130,14 +129,13 @@ public class ColumnDescriptor extends TupleDescriptor", "removed": [ "\t\t * @param autoinc\t\tboolean value for sanity checking.", "\t\t\t\t\t\t UUID defaultUUID, long autoincStart, long autoincInc, boolean autoinc)" ] }, { "added": [ "\t\tassertAutoinc(autoincInc != 0," ], "header": "@@ -151,7 +149,7 @@ public class ColumnDescriptor extends TupleDescriptor", "removed": [ "\t\tassertAutoinc(autoinc," ] }, { "added": [ " long autoincStart, long autoincInc)" ], "header": "@@ -178,14 +176,13 @@ public class ColumnDescriptor extends TupleDescriptor", "removed": [ "\t * @param autoinc\t\tBoolean value, for sanity checking.", " long autoincStart, long autoincInc, boolean autoinc)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java", "hunks": [ { "added": [ " (UUID) null, 0, 0);" ], "header": "@@ -3078,7 +3078,7 @@ public final class\tDataDictionaryImpl", "removed": [ " (UUID) null, 0, 0, false);" ] }, { "added": [ "\t\t\t\t\t\t\t\t\t\t 0, 0);" ], "header": "@@ -3262,7 +3262,7 @@ public final class\tDataDictionaryImpl", "removed": [ "\t\t\t\t\t\t\t\t\t\t 0, 0, false);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/SYSCOLUMNSRowFactory.java", "hunks": [ { "added": [], "header": "@@ -503,11 +503,6 @@ public class SYSCOLUMNSRowFactory extends CatalogRowFactory", "removed": [ "\t\t/* NOTE: We use the autoincColumn variable in order to work around ", "\t\t * a 1.3.0 HotSpot bug. (#4361550)", "\t\t */", "\t\tboolean autoincColumn = (autoincInc != 0); ", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/AlterTableConstantAction.java", "hunks": [ { "added": [ "\t\t\t\t\t\t\t\t\t\t\t\t columnInfo[ix].autoincInc" ], "header": "@@ -623,8 +623,7 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "\t\t\t\t\t\t\t\t\t\t\t\t columnInfo[ix].autoincInc,", "\t\t\t\t\t\t\t\t\t\t\t\t columnInfo[ix].autoincInc != 0" ] }, { "added": [ "\t\t\t\t\t\t\t\t columnInfo[ix].autoincInc" ], "header": "@@ -923,8 +922,7 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "\t\t\t\t\t\t\t\t columnInfo[ix].autoincInc,", "\t\t\t\t\t\t\t\t columnInfo[ix].autoincInc != 0" ] }, { "added": [ "\t\t\t\t\t\t\t\t\tcolumnDescriptor.getAutoincInc());" ], "header": "@@ -966,8 +964,7 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "\t\t\t\t\t\t\t\t\tcolumnDescriptor.getAutoincInc(),", "\t\t\t\t\t\t\t\t\tcolumnDescriptor.getAutoincInc() != 0);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/CreateTableConstantAction.java", "hunks": [ { "added": [ "\t\t\tif (columnInfo[ix].autoincInc != 0)//dealing with autoinc column" ], "header": "@@ -256,6 +256,7 @@ class CreateTableConstantAction extends DDLConstantAction", "removed": [] } ] } ]
derby-DERBY-1329-fdd31675
DERBY-1329: Set ColumnReference in CurrentOfNode when a match is found. Attaching a patch to address this issue. In a word, the problem is that the ColumnReference in a CurrentOfNode can, in certain situations, end up with a tableNumber that is never set, and hence it defaults to -1. The fix I've made ensures that the ColumnReference's tableNumber will always be set when necessary--i.e. when we've found the ResultColumn that matches the received ColumnReference. I think this is the correct fix for two reasons: 1) In FromList.bindColumnReferences(), there is the following comment: /* TableNumbers are set in the CR in the underlying * FromTable. This ensures that they get the table * number from the underlying table, not the join node. * This is important for beging able to push predicates * down through join nodes. */ The place where "TableNumbers are set" is in the getMatchingColumn() call, which means that the underlying FromTable (which includes CurrentOfNode) is responsible for setting the table number. 2) Inspection of all other FromTables that implement getMatchingColumn() shows that they all set the ColumnReference's table number if the corresponding ResultColumn is found. The one exception is JoinNode, but the getMatchingColumn() method in JoinNode in turn calls the method of the same name on the join's left and right nodes, so we know that, eventually, the ColumnReference's tableNumber will get set by one of the other FromTable's getMatchingColumn() calls. So the only FromTable that does not set the tableNumber is CurrentOfNode, and that's the reason for the failure described in this issue. The change seems fairly minor but if anyone has a chance to double-check it, that'd be great. I also added a test case (using the repro posted in the above comments) to lang/update.sql. I ran derbyall on Linux Red Hat (RHEL4) using ibm142 and saw no new failures. Submitted by Army Brown (gozinx@gmail.com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@411393 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/CurrentOfNode.java", "hunks": [ { "added": [ "\t\t\t\t// If we found the ResultColumn, set the ColumnReference's", "\t\t\t\t// table number accordingly. Note: we used to only set", "\t\t\t\t// the tableNumber for correlated references (as part of", "\t\t\t\t// changes for DERBY-171) but inspection of code (esp.", "\t\t\t\t// the comments in FromList.bindColumnReferences() and", "\t\t\t\t// the getMatchingColumn() methods on other FromTables)", "\t\t\t\t// suggests that we should always set the table number", "\t\t\t\t// if we've found the ResultColumn. So we do that here.", "\t\t\t\tcolumnReference.setTableNumber( tableNumber );", "" ], "header": "@@ -347,6 +347,16 @@ public final class CurrentOfNode extends FromTable {", "removed": [] } ] } ]
derby-DERBY-1338-5ad71053
DERBY-1338: Client tests fail with NoClassDefFound: DRDAProtocolExceptionInfo Patch contributed by Dag Wanvik (dag.wanvik@sun.com) Work around a classloader bug involving interrupt handling during class loading. If the first request to load the DRDAProtocolExceptionInfo class occurs during shutdown, the loading of the class may be aborted when the Network Server calls Thread.interrupt() on the DRDAConnThread. By including a static reference to the DRDAProtocolExceptionInfo class here, we ensure that it is loaded as soon as the DRDAConnThread class is loaded, and therefore we know we won't be trying to load the class during shutdown. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@416012 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java", "hunks": [ { "added": [ " // Work around a classloader bug involving interrupt handling during", " // class loading. If the first request to load the", " // DRDAProtocolExceptionInfo class occurs during shutdown, the", " // loading of the class may be aborted when the Network Server calls", " // Thread.interrupt() on the DRDAConnThread. By including a static", " // reference to the DRDAProtocolExceptionInfo class here, we ensure", " // that it is loaded as soon as the DRDAConnThread class is loaded,", " // and therefore we know we won't be trying to load the class during", " // shutdown. See DERBY-1338 for more background, including pointers", " // to the apparent classloader bug in the JVM.", "\tprivate static final DRDAProtocolExceptionInfo dummy =", "\t\tnew DRDAProtocolExceptionInfo(0,0,0,false);", "" ], "header": "@@ -173,6 +173,19 @@ class DRDAConnThread extends Thread {", "removed": [] } ] } ]
derby-DERBY-1340-ceb72100
DERBY-1340: Anurag's derby-1340.diff, which reverts the refactoring of the client api committed as part of DERBY-1246. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@409007 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/jdbc/ClientConnectionPoolDataSource40.java", "hunks": [ { "added": [ "import java.sql.QueryObjectFactory;", "import org.apache.derby.client.am.ClientMessageId;", "import org.apache.derby.client.am.SqlException;", "import org.apache.derby.shared.common.reference.SQLState;" ], "header": "@@ -21,10 +21,12 @@", "removed": [ "import javax.sql.ConnectionPoolDataSource;", "import org.apache.derby.client.am.SQLExceptionFactory;" ] } ] }, { "file": "java/client/org/apache/derby/jdbc/ClientXADataSource40.java", "hunks": [ { "added": [ "import java.sql.QueryObjectFactory;", "import org.apache.derby.client.am.ClientMessageId;", "import org.apache.derby.shared.common.reference.SQLState;" ], "header": "@@ -21,14 +21,15 @@", "removed": [ "import javax.sql.DataSource;", "import org.apache.derby.client.am.SQLExceptionFactory;" ] }, { "added": [ " }", " ", " /**", " * This method forwards all the calls to default query object provided by ", " * the jdk.", " * @param ifc interface to generated concreate class", " * @return concreat class generated by default qury object generator", " */", " public <T extends BaseQuery> T createQueryObject(Class<T> ifc) ", " throws SQLException {", " return QueryObjectFactory.createDefaultQueryObject (ifc, this);", " } ", " ", " /**", " * Returns false unless <code>interfaces</code> is implemented ", " * ", " * @param interfaces a Class defining an interface.", " * @return true if this implements the interface or ", " * directly or indirectly wraps an object ", " * that does.", " * @throws java.sql.SQLException if an error occurs while determining ", " * whether this is a wrapper for an object ", " * with the given interface.", " */", " public boolean isWrapperFor(Class<?> interfaces) throws SQLException {", " return interfaces.isInstance(this);", " }", " ", " /**", " * Returns <code>this</code> if this class implements the interface", " *", " * @param interfaces a Class defining an interface", " * @return an object that implements the interface", " * @throws java.sql.SQLExption if no object if found that implements the ", " * interface", " */", " public <T> T unwrap(java.lang.Class<T> interfaces)", " throws SQLException {", " try { ", " return interfaces.cast(this);", " } catch (ClassCastException cce) {", " throw new SqlException(null,new ClientMessageId(", " SQLState.UNABLE_TO_UNWRAP), interfaces).getSQLException();", " }", " }" ], "header": "@@ -78,5 +79,49 @@ public class ClientXADataSource40 extends ClientXADataSource {", "removed": [ " } " ] } ] } ]
derby-DERBY-1343-14299573
DERBY-2397 (refactor) Move drop code for ConglomerateDescriptor into ConglomerateDescriptor.drop(). Add various comments from information gained while refactoring code and minor cleanup. One more dropping of a ConglomerateDescriptor needs to be modified to use the drop() method but requires some cleanup for DERBY-1343 git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@518343 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/dictionary/ConglomerateDescriptor.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;", "import org.apache.derby.iapi.sql.depend.DependencyManager;", "import org.apache.derby.iapi.error.StandardException;", "import org.apache.derby.iapi.store.access.TransactionController;" ], "header": "@@ -21,12 +21,16 @@", "removed": [] }, { "added": [ " * ", " * A ConglomerateDescriptor can map to a base table, an index", " * or a index backing a constraint. Multiple ConglomerateDescriptors", " * can map to a single underlying store conglomerate, such as when", " * multiple index definitions share a physical file.", " * ", " * " ], "header": "@@ -37,12 +41,19 @@ import org.apache.derby.iapi.services.monitor.Monitor;", "removed": [] }, { "added": [ "\tprivate transient String[]\tcolumnNames;" ], "header": "@@ -53,7 +64,7 @@ public final class ConglomerateDescriptor extends TupleDescriptor", "removed": [ "\tprivate String[]\tcolumnNames;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CreateIndexNode.java", "hunks": [ { "added": [], "header": "@@ -244,7 +244,6 @@ public class CreateIndexNode extends DDLStatementNode", "removed": [ " long \t\t\t\t\tconglomId = 0;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/CreateConstraintConstantAction.java", "hunks": [ { "added": [ "\t * <P>", "\t * A constraint is represented as:", "\t * <UL>", "\t * <LI> ConstraintDescriptor.", "\t * </UL>", "\t * If a backing index is required then the index will", "\t * be created through an CreateIndexConstantAction setup", "\t * by the compiler.", "\t * <BR>", "\t * Dependencies are created as:", "\t * <UL>", "\t * <LI> ConstraintDescriptor depends on all the providers collected", " * at compile time and passed into the constructor.", "\t * <LI> For a FOREIGN KEY constraint ConstraintDescriptor depends", " * on the ConstraintDescriptor for the referenced constraints", " * and the privileges required to create the constraint.", "\t * </UL>", "", "\t * @see ConstraintDescriptor", "\t * @see CreateIndexConstantAction" ], "header": "@@ -127,7 +127,26 @@ public class CreateConstraintConstantAction extends ConstraintConstantAction", "removed": [ "\t *" ] }, { "added": [ " " ], "header": "@@ -214,7 +233,7 @@ public class CreateConstraintConstantAction extends ConstraintConstantAction", "removed": [ "" ] }, { "added": [ "" ], "header": "@@ -226,6 +245,7 @@ public class CreateConstraintConstantAction extends ConstraintConstantAction", "removed": [] }, { "added": [], "header": "@@ -259,10 +279,6 @@ public class CreateConstraintConstantAction extends ConstraintConstantAction", "removed": [ "\t\t// if no constraintId was specified, we should generate one. this handles", "\t\t// the two cases of Source creation and Target replication. At the source", "\t\t// database, we allocate a new UUID. At the Target, we just use the UUID that", "\t\t// the Source sent along." ] }, { "added": [ "\tboolean isForeignKeyConstraint()" ], "header": "@@ -417,7 +433,7 @@ public class CreateConstraintConstantAction extends ConstraintConstantAction", "removed": [ "\tpublic boolean isForeignKeyConstraint()" ] }, { "added": [], "header": "@@ -475,12 +491,6 @@ public class CreateConstraintConstantAction extends ConstraintConstantAction", "removed": [ "\t/**", "\t *\tGet the names of the columns touched by this constraint.", "\t *", "\t *\t@return\tthe array of touched column names.", "\t */", " public\tString[]\tgetColumnNames() { return columnNames; }" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/CreateIndexConstantAction.java", "hunks": [ { "added": [ " * ConstantAction to create an index either through", " * a CREATE INDEX statement or as a backing index to", " * a constraint." ], "header": "@@ -61,9 +61,9 @@ import org.apache.derby.iapi.types.RowLocation;", "removed": [ " *\tThis class describes actions that are ALWAYS performed for a", " *\tCREATE TABLE Statement at Execution time.", " *" ] }, { "added": [], "header": "@@ -71,7 +71,6 @@ class CreateIndexConstantAction extends IndexConstantAction", "removed": [ "\tprivate long\t\t\tconglomId;" ] }, { "added": [ "\t *\tMake the ConstantAction to create an index." ], "header": "@@ -83,7 +82,7 @@ class CreateIndexConstantAction extends IndexConstantAction", "removed": [ "\t *\tMake the ConstantAction for a CREATE INDEX statement." ] }, { "added": [], "header": "@@ -91,7 +90,6 @@ class CreateIndexConstantAction extends IndexConstantAction", "removed": [ "\t * @param conglomId\tConglomerate ID of the index, if known in advance" ] }, { "added": [], "header": "@@ -105,7 +103,6 @@ class CreateIndexConstantAction extends IndexConstantAction", "removed": [ "\t\t\t\t\t\t\t\tlong\t\t\tconglomId," ] }, { "added": [], "header": "@@ -115,7 +112,6 @@ class CreateIndexConstantAction extends IndexConstantAction", "removed": [ "\t\tthis.conglomId= conglomId;" ] }, { "added": [ "\t *\tThis is the guts of the Execution-time logic for ", " * creating an index.", " *", " * <P>", " * A index is represented as:", " * <UL>", " * <LI> ConglomerateDescriptor.", " * </UL>", " * No dependencies are created.", " \t *", " * @see ConglomerateDescriptor", " * @see SchemaDescriptor" ], "header": "@@ -140,8 +136,18 @@ class CreateIndexConstantAction extends IndexConstantAction", "removed": [ "\t *\tThis is the guts of the Execution-time logic for CREATE INDEX.", "\t *" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/DropIndexConstantAction.java", "hunks": [ { "added": [], "header": "@@ -113,7 +113,6 @@ class DropIndexConstantAction extends IndexConstantAction", "removed": [ "\t\tDependencyManager dm = dd.getDependencyManager();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/GenericConstantActionFactory.java", "hunks": [ { "added": [], "header": "@@ -198,7 +198,6 @@ public class GenericConstantActionFactory", "removed": [ "\t * @param conglomId\tConglomerate ID of the index, if known in advance" ] }, { "added": [], "header": "@@ -213,7 +212,6 @@ public class GenericConstantActionFactory", "removed": [ "\t\tlong\t\t\tconglomId," ] }, { "added": [ "\t\t\t columnNames, isAscending, isConstraint," ], "header": "@@ -223,7 +221,7 @@ public class GenericConstantActionFactory", "removed": [ "\t\t\t conglomId, columnNames, isAscending, isConstraint," ] } ] } ]
derby-DERBY-1348-945a0cca
DERBY-1348: hack to propagate derbyTesting.jar.path from system->suite->test. Harness' use of properties really needs cleaning up. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@409534 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/functionTests/harness/RunList.java", "hunks": [ { "added": [ "\tstatic String upgradejarpath;\t// Encoding used for child jvm and to read the test output ", " static String derbyTestingXaSingle;// Run junit test cases with under ", " // single branck xa transaction" ], "header": "@@ -119,8 +119,9 @@ public class RunList", "removed": [ " static String derbyTestingXaSingle;// Run junit test cases with under ", " // single branck xa transaction" ] }, { "added": [ " if (upgradejarpath != null)", " jvmProps.addElement(\"derbyTesting.jar.path=\" + upgradejarpath);" ], "header": "@@ -419,7 +420,8 @@ public class RunList", "removed": [ "" ] }, { "added": [ " upgradejarpath = parentProps.getProperty(\"derbyTesting.jar.path\");" ], "header": "@@ -651,6 +653,7 @@ public class RunList", "removed": [] }, { "added": [ "\t\tupgradejarpath = suiteProperties.getProperty(\"derbyTesting.jar.path\");" ], "header": "@@ -781,6 +784,7 @@ public class RunList", "removed": [] } ] } ]
derby-DERBY-1354-5d11c1f4
DERBY-1245 Add o.a.derby.client.am.ClobWriter test coverage DERBY-1354 Writer.write(int c) to writer from Clob.setCharacterStream(long pos) appends integer value, not character Contributed by Anders Morken git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@409688 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-1356-16f19988
DERBY-1356: Positioned update/delete when positioned after last or before first causes NullPointerException Patch contributed by Fernanda Pizzorno <Fernanda.Pizzorno@Sun.COM>. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@411428 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/ScrollInsensitiveResultSet.java", "hunks": [ { "added": [ "\t\tif (currentPosition <= positionInSource && currentPosition > 0) {", "\t\t\tpositionInHashTable.setValue(currentPosition);", "\t\t\tDataValueDescriptor[] hashRowArray = (DataValueDescriptor[]) ", "\t\t\t\t\tht.get(positionInHashTable);", "\t\t\treturn hashRowArray[POS_ROWDELETED].getBoolean();", "\t\t}", "\t\treturn false;" ], "header": "@@ -1124,10 +1124,13 @@ public class ScrollInsensitiveResultSet extends NoPutResultSetImpl", "removed": [ "\t\tpositionInHashTable.setValue(currentPosition);", "\t\tDataValueDescriptor[] hashRowArray = (DataValueDescriptor[]) ", "\t\t\t\tht.get(positionInHashTable);", "\t\treturn hashRowArray[POS_ROWDELETED].getBoolean();" ] } ] } ]