defects4j_project
stringclasses
12 values
defects4j_bug_id
stringlengths
1
3
file_path
stringlengths
38
95
bug_start_line
stringlengths
1
4
bug_end_line
stringlengths
2
4
issue_title
stringlengths
13
150
issue_description
stringlengths
4
8.74k
original_src
stringlengths
44
9k
original_src_wo_comments
stringlengths
38
5.83k
fixed_src
stringlengths
40
9.55k
fixed_src_wo_comments
stringlengths
34
5.76k
JacksonCore
3
src/main/java/com/fasterxml/jackson/core/json/UTF8StreamJsonParser.java
113
127
_currInputRowStart isn't initialized in UTF8StreamJsonParser() constructor. The column position will be wrong.
The UTF8StreamJson Parser constructor allows to specify the start position. But it doesn't set the "_currInputRowStart" as the same value. It is still 0. So when raise the exception, the column calculation (ParserBase.getCurrentLocation() )will be wrong. int col = _inputPtr - _currInputRowStart + 1; // 1-based public UTF8StreamJsonParser(IOContext ctxt, int features, InputStream in, ObjectCodec codec, BytesToNameCanonicalizer sym, byte[] inputBuffer, int start, int end, boolean bufferRecyclable)
public UTF8StreamJsonParser(IOContext ctxt, int features, InputStream in, ObjectCodec codec, BytesToNameCanonicalizer sym, byte[] inputBuffer, int start, int end, boolean bufferRecyclable) { super(ctxt, features); _inputStream = in; _objectCodec = codec; _symbols = sym; _inputBuffer = inputBuffer; _inputPtr = start; _inputEnd = end; // If we have offset, need to omit that from byte offset, so: _bufferRecyclable = bufferRecyclable; }
public UTF8StreamJsonParser ( IOContext ctxt , int features , InputStream in , ObjectCodec codec , BytesToNameCanonicalizer sym , byte [ ] inputBuffer , int start , int end , boolean bufferRecyclable ) { super ( ctxt , features ) ; _inputStream = in ; _objectCodec = codec ; _symbols = sym ; _inputBuffer = inputBuffer ; _inputPtr = start ; _inputEnd = end ; _bufferRecyclable = bufferRecyclable ; }
public UTF8StreamJsonParser(IOContext ctxt, int features, InputStream in, ObjectCodec codec, BytesToNameCanonicalizer sym, byte[] inputBuffer, int start, int end, boolean bufferRecyclable) { super(ctxt, features); _inputStream = in; _objectCodec = codec; _symbols = sym; _inputBuffer = inputBuffer; _inputPtr = start; _inputEnd = end; _currInputRowStart = start; // If we have offset, need to omit that from byte offset, so: _currInputProcessed = -start; _bufferRecyclable = bufferRecyclable; }
public UTF8StreamJsonParser ( IOContext ctxt , int features , InputStream in , ObjectCodec codec , BytesToNameCanonicalizer sym , byte [ ] inputBuffer , int start , int end , boolean bufferRecyclable ) { super ( ctxt , features ) ; _inputStream = in ; _objectCodec = codec ; _symbols = sym ; _inputBuffer = inputBuffer ; _inputPtr = start ; _inputEnd = end ; _currInputRowStart = start ; _currInputProcessed = - start ; _bufferRecyclable = bufferRecyclable ; }
JacksonDatabind
43
src/main/java/com/fasterxml/jackson/databind/deser/impl/ObjectIdValueProperty.java
74
96
Problem with Object id handling, explicit `null` token
According to #742, it shouldn't throw an exception if the value of the property is null
@Override public Object deserializeSetAndReturn(JsonParser p, DeserializationContext ctxt, Object instance) throws IOException { Object id = _valueDeserializer.deserialize(p, ctxt); /* 02-Apr-2015, tatu: Actually, as per [databind#742], let it be; * missing or null id is needed for some cases, such as cases where id * will be generated externally, at a later point, and is not available * quite yet. Typical use case is with DB inserts. */ // note: no null checks (unlike usually); deserializer should fail if one found if (id == null) { return null; } ReadableObjectId roid = ctxt.findObjectId(id, _objectIdReader.generator, _objectIdReader.resolver); roid.bindItem(instance); // also: may need to set a property value as well SettableBeanProperty idProp = _objectIdReader.idProperty; if (idProp != null) { return idProp.setAndReturn(instance, id); } return instance; }
@ Override public Object deserializeSetAndReturn ( JsonParser p , DeserializationContext ctxt , Object instance ) throws IOException { Object id = _valueDeserializer . deserialize ( p , ctxt ) ; if ( id == null ) { return null ; } ReadableObjectId roid = ctxt . findObjectId ( id , _objectIdReader . generator , _objectIdReader . resolver ) ; roid . bindItem ( instance ) ; SettableBeanProperty idProp = _objectIdReader . idProperty ; if ( idProp != null ) { return idProp . setAndReturn ( instance , id ) ; } return instance ; }
@Override public Object deserializeSetAndReturn(JsonParser p, DeserializationContext ctxt, Object instance) throws IOException { /* 02-Apr-2015, tatu: Actually, as per [databind#742], let it be; * missing or null id is needed for some cases, such as cases where id * will be generated externally, at a later point, and is not available * quite yet. Typical use case is with DB inserts. */ // note: no null checks (unlike usually); deserializer should fail if one found if (p.hasToken(JsonToken.VALUE_NULL)) { return null; } Object id = _valueDeserializer.deserialize(p, ctxt); ReadableObjectId roid = ctxt.findObjectId(id, _objectIdReader.generator, _objectIdReader.resolver); roid.bindItem(instance); // also: may need to set a property value as well SettableBeanProperty idProp = _objectIdReader.idProperty; if (idProp != null) { return idProp.setAndReturn(instance, id); } return instance; }
@ Override public Object deserializeSetAndReturn ( JsonParser p , DeserializationContext ctxt , Object instance ) throws IOException { if ( p . hasToken ( JsonToken . VALUE_NULL ) ) { return null ; } Object id = _valueDeserializer . deserialize ( p , ctxt ) ; ReadableObjectId roid = ctxt . findObjectId ( id , _objectIdReader . generator , _objectIdReader . resolver ) ; roid . bindItem ( instance ) ; SettableBeanProperty idProp = _objectIdReader . idProperty ; if ( idProp != null ) { return idProp . setAndReturn ( instance , id ) ; } return instance ; }
JacksonDatabind
107
src/main/java/com/fasterxml/jackson/databind/jsontype/impl/TypeDeserializerBase.java
146
199
`DeserializationProblemHandler.handleUnknownTypeId()` returning `Void.class`, enableDefaultTyping causing NPE
Returning Void.class from com.fasterxml.jackson.databind.deser.HandleUnknowTypeIdTest.testDeserializationWithDeserializationProblemHandler().new DeserializationProblemHandler() {...}.handleUnknownTypeId(DeserializationContext, JavaType, String, TypeIdResolver, String) is causing a NPE in jackson 2.9. I'll provide a pull request illustrating the issue in a test.
protected final JsonDeserializer<Object> _findDeserializer(DeserializationContext ctxt, String typeId) throws IOException { JsonDeserializer<Object> deser = _deserializers.get(typeId); if (deser == null) { /* As per [databind#305], need to provide contextual info. But for * backwards compatibility, let's start by only supporting this * for base class, not via interface. Later on we can add this * to the interface, assuming deprecation at base class helps. */ JavaType type = _idResolver.typeFromId(ctxt, typeId); if (type == null) { // use the default impl if no type id available: deser = _findDefaultImplDeserializer(ctxt); if (deser == null) { // 10-May-2016, tatu: We may get some help... JavaType actual = _handleUnknownTypeId(ctxt, typeId); if (actual == null) { // what should this be taken to mean? // 17-Jan-2019, tatu: As per [databind#2221], better NOT return `null` but... return null; } // ... would this actually work? deser = ctxt.findContextualValueDeserializer(actual, _property); } } else { /* 16-Dec-2010, tatu: Since nominal type we get here has no (generic) type parameters, * we actually now need to explicitly narrow from base type (which may have parameterization) * using raw type. * * One complication, though; cannot change 'type class' (simple type to container); otherwise * we may try to narrow a SimpleType (Object.class) into MapType (Map.class), losing actual * type in process (getting SimpleType of Map.class which will not work as expected) */ if ((_baseType != null) && _baseType.getClass() == type.getClass()) { /* 09-Aug-2015, tatu: Not sure if the second part of the check makes sense; * but it appears to check that JavaType impl class is the same which is * important for some reason? * Disabling the check will break 2 Enum-related tests. */ // 19-Jun-2016, tatu: As per [databind#1270] we may actually get full // generic type with custom type resolvers. If so, should try to retain them. // Whether this is sufficient to avoid problems remains to be seen, but for // now it should improve things. if (!type.hasGenericTypes()) { type = ctxt.getTypeFactory().constructSpecializedType(_baseType, type.getRawClass()); } } deser = ctxt.findContextualValueDeserializer(type, _property); } _deserializers.put(typeId, deser); } return deser; }
protected final JsonDeserializer < Object > _findDeserializer ( DeserializationContext ctxt , String typeId ) throws IOException { JsonDeserializer < Object > deser = _deserializers . get ( typeId ) ; if ( deser == null ) { JavaType type = _idResolver . typeFromId ( ctxt , typeId ) ; if ( type == null ) { deser = _findDefaultImplDeserializer ( ctxt ) ; if ( deser == null ) { JavaType actual = _handleUnknownTypeId ( ctxt , typeId ) ; if ( actual == null ) { return null ; } deser = ctxt . findContextualValueDeserializer ( actual , _property ) ; } } else { if ( ( _baseType != null ) && _baseType . getClass ( ) == type . getClass ( ) ) { if ( ! type . hasGenericTypes ( ) ) { type = ctxt . getTypeFactory ( ) . constructSpecializedType ( _baseType , type . getRawClass ( ) ) ; } } deser = ctxt . findContextualValueDeserializer ( type , _property ) ; } _deserializers . put ( typeId , deser ) ; } return deser ; }
protected final JsonDeserializer<Object> _findDeserializer(DeserializationContext ctxt, String typeId) throws IOException { JsonDeserializer<Object> deser = _deserializers.get(typeId); if (deser == null) { /* As per [databind#305], need to provide contextual info. But for * backwards compatibility, let's start by only supporting this * for base class, not via interface. Later on we can add this * to the interface, assuming deprecation at base class helps. */ JavaType type = _idResolver.typeFromId(ctxt, typeId); if (type == null) { // use the default impl if no type id available: deser = _findDefaultImplDeserializer(ctxt); if (deser == null) { // 10-May-2016, tatu: We may get some help... JavaType actual = _handleUnknownTypeId(ctxt, typeId); if (actual == null) { // what should this be taken to mean? // 17-Jan-2019, tatu: As per [databind#2221], better NOT return `null` but... return NullifyingDeserializer.instance; } // ... would this actually work? deser = ctxt.findContextualValueDeserializer(actual, _property); } } else { /* 16-Dec-2010, tatu: Since nominal type we get here has no (generic) type parameters, * we actually now need to explicitly narrow from base type (which may have parameterization) * using raw type. * * One complication, though; cannot change 'type class' (simple type to container); otherwise * we may try to narrow a SimpleType (Object.class) into MapType (Map.class), losing actual * type in process (getting SimpleType of Map.class which will not work as expected) */ if ((_baseType != null) && _baseType.getClass() == type.getClass()) { /* 09-Aug-2015, tatu: Not sure if the second part of the check makes sense; * but it appears to check that JavaType impl class is the same which is * important for some reason? * Disabling the check will break 2 Enum-related tests. */ // 19-Jun-2016, tatu: As per [databind#1270] we may actually get full // generic type with custom type resolvers. If so, should try to retain them. // Whether this is sufficient to avoid problems remains to be seen, but for // now it should improve things. if (!type.hasGenericTypes()) { type = ctxt.getTypeFactory().constructSpecializedType(_baseType, type.getRawClass()); } } deser = ctxt.findContextualValueDeserializer(type, _property); } _deserializers.put(typeId, deser); } return deser; }
protected final JsonDeserializer < Object > _findDeserializer ( DeserializationContext ctxt , String typeId ) throws IOException { JsonDeserializer < Object > deser = _deserializers . get ( typeId ) ; if ( deser == null ) { JavaType type = _idResolver . typeFromId ( ctxt , typeId ) ; if ( type == null ) { deser = _findDefaultImplDeserializer ( ctxt ) ; if ( deser == null ) { JavaType actual = _handleUnknownTypeId ( ctxt , typeId ) ; if ( actual == null ) { return NullifyingDeserializer . instance ; } deser = ctxt . findContextualValueDeserializer ( actual , _property ) ; } } else { if ( ( _baseType != null ) && _baseType . getClass ( ) == type . getClass ( ) ) { if ( ! type . hasGenericTypes ( ) ) { type = ctxt . getTypeFactory ( ) . constructSpecializedType ( _baseType , type . getRawClass ( ) ) ; } } deser = ctxt . findContextualValueDeserializer ( type , _property ) ; } _deserializers . put ( typeId , deser ) ; } return deser ; }
Math
26
src/main/java/org/apache/commons/math3/fraction/Fraction.java
175
238
Fraction(double, int) constructor strange behaviour
The Fraction constructor Fraction(double, int) takes a double value and a int maximal denominator, and approximates a fraction. When the double value is a large, negative number with many digits in the fractional part, and the maximal denominator is a big, positive integer (in the 100'000s), two distinct bugs can manifest: 1: the constructor returns a positive Fraction. Calling Fraction(-33655.1677817278, 371880) returns the fraction 410517235/243036, which both has the wrong sign, and is far away from the absolute value of the given value 2: the constructor does not manage to reduce the Fraction properly. Calling Fraction(-43979.60679604749, 366081) returns the fraction -1651878166/256677, which should have* been reduced to -24654898/3831. I have, as of yet, not found a solution. The constructor looks like this: public Fraction(double value, int maxDenominator) throws FractionConversionException { this(value, 0, maxDenominator, 100); } Increasing the 100 value (max iterations) does not fix the problem for all cases. Changing the 0-value (the epsilon, maximum allowed error) to something small does not work either, as this breaks the tests in FractionTest. The problem is not neccissarily that the algorithm is unable to approximate a fraction correctly. A solution where a FractionConversionException had been thrown in each of these examples would probably be the best solution if an improvement on the approximation algorithm turns out to be hard to find. This bug has been found when trying to explore the idea of axiom-based testing (http://bldl.ii.uib.no/testing.html). Attached is a java test class FractionTestByAxiom (junit, goes into org.apache.commons.math3.fraction) which shows these bugs through a simplified approach to this kind of testing, and a text file describing some of the value/maxDenominator combinations which causes one of these failures. * It is never specified in the documentation that the Fraction class guarantees that completely reduced rational numbers are constructed, but a comment inside the equals method claims that "since fractions are always in lowest terms, numerators and can be compared directly for equality", so it seems like this is the intention.
private Fraction(double value, double epsilon, int maxDenominator, int maxIterations) throws FractionConversionException { long overflow = Integer.MAX_VALUE; double r0 = value; long a0 = (long)FastMath.floor(r0); if (a0 > overflow) { throw new FractionConversionException(value, a0, 1l); } // check for (almost) integer arguments, which should not go // to iterations. if (FastMath.abs(a0 - value) < epsilon) { this.numerator = (int) a0; this.denominator = 1; return; } long p0 = 1; long q0 = 0; long p1 = a0; long q1 = 1; long p2 = 0; long q2 = 1; int n = 0; boolean stop = false; do { ++n; double r1 = 1.0 / (r0 - a0); long a1 = (long)FastMath.floor(r1); p2 = (a1 * p1) + p0; q2 = (a1 * q1) + q0; if ((p2 > overflow) || (q2 > overflow)) { throw new FractionConversionException(value, p2, q2); } double convergent = (double)p2 / (double)q2; if (n < maxIterations && FastMath.abs(convergent - value) > epsilon && q2 < maxDenominator) { p0 = p1; p1 = p2; q0 = q1; q1 = q2; a0 = a1; r0 = r1; } else { stop = true; } } while (!stop); if (n >= maxIterations) { throw new FractionConversionException(value, maxIterations); } if (q2 < maxDenominator) { this.numerator = (int) p2; this.denominator = (int) q2; } else { this.numerator = (int) p1; this.denominator = (int) q1; } }
private Fraction ( double value , double epsilon , int maxDenominator , int maxIterations ) throws FractionConversionException { long overflow = Integer . MAX_VALUE ; double r0 = value ; long a0 = ( long ) FastMath . floor ( r0 ) ; if ( a0 > overflow ) { throw new FractionConversionException ( value , a0 , 1l ) ; } if ( FastMath . abs ( a0 - value ) < epsilon ) { this . numerator = ( int ) a0 ; this . denominator = 1 ; return ; } long p0 = 1 ; long q0 = 0 ; long p1 = a0 ; long q1 = 1 ; long p2 = 0 ; long q2 = 1 ; int n = 0 ; boolean stop = false ; do { ++ n ; double r1 = 1.0 / ( r0 - a0 ) ; long a1 = ( long ) FastMath . floor ( r1 ) ; p2 = ( a1 * p1 ) + p0 ; q2 = ( a1 * q1 ) + q0 ; if ( ( p2 > overflow ) || ( q2 > overflow ) ) { throw new FractionConversionException ( value , p2 , q2 ) ; } double convergent = ( double ) p2 / ( double ) q2 ; if ( n < maxIterations && FastMath . abs ( convergent - value ) > epsilon && q2 < maxDenominator ) { p0 = p1 ; p1 = p2 ; q0 = q1 ; q1 = q2 ; a0 = a1 ; r0 = r1 ; } else { stop = true ; } } while ( ! stop ) ; if ( n >= maxIterations ) { throw new FractionConversionException ( value , maxIterations ) ; } if ( q2 < maxDenominator ) { this . numerator = ( int ) p2 ; this . denominator = ( int ) q2 ; } else { this . numerator = ( int ) p1 ; this . denominator = ( int ) q1 ; } }
private Fraction(double value, double epsilon, int maxDenominator, int maxIterations) throws FractionConversionException { long overflow = Integer.MAX_VALUE; double r0 = value; long a0 = (long)FastMath.floor(r0); if (FastMath.abs(a0) > overflow) { throw new FractionConversionException(value, a0, 1l); } // check for (almost) integer arguments, which should not go // to iterations. if (FastMath.abs(a0 - value) < epsilon) { this.numerator = (int) a0; this.denominator = 1; return; } long p0 = 1; long q0 = 0; long p1 = a0; long q1 = 1; long p2 = 0; long q2 = 1; int n = 0; boolean stop = false; do { ++n; double r1 = 1.0 / (r0 - a0); long a1 = (long)FastMath.floor(r1); p2 = (a1 * p1) + p0; q2 = (a1 * q1) + q0; if ((FastMath.abs(p2) > overflow) || (FastMath.abs(q2) > overflow)) { throw new FractionConversionException(value, p2, q2); } double convergent = (double)p2 / (double)q2; if (n < maxIterations && FastMath.abs(convergent - value) > epsilon && q2 < maxDenominator) { p0 = p1; p1 = p2; q0 = q1; q1 = q2; a0 = a1; r0 = r1; } else { stop = true; } } while (!stop); if (n >= maxIterations) { throw new FractionConversionException(value, maxIterations); } if (q2 < maxDenominator) { this.numerator = (int) p2; this.denominator = (int) q2; } else { this.numerator = (int) p1; this.denominator = (int) q1; } }
private Fraction ( double value , double epsilon , int maxDenominator , int maxIterations ) throws FractionConversionException { long overflow = Integer . MAX_VALUE ; double r0 = value ; long a0 = ( long ) FastMath . floor ( r0 ) ; if ( FastMath . abs ( a0 ) > overflow ) { throw new FractionConversionException ( value , a0 , 1l ) ; } if ( FastMath . abs ( a0 - value ) < epsilon ) { this . numerator = ( int ) a0 ; this . denominator = 1 ; return ; } long p0 = 1 ; long q0 = 0 ; long p1 = a0 ; long q1 = 1 ; long p2 = 0 ; long q2 = 1 ; int n = 0 ; boolean stop = false ; do { ++ n ; double r1 = 1.0 / ( r0 - a0 ) ; long a1 = ( long ) FastMath . floor ( r1 ) ; p2 = ( a1 * p1 ) + p0 ; q2 = ( a1 * q1 ) + q0 ; if ( ( FastMath . abs ( p2 ) > overflow ) || ( FastMath . abs ( q2 ) > overflow ) ) { throw new FractionConversionException ( value , p2 , q2 ) ; } double convergent = ( double ) p2 / ( double ) q2 ; if ( n < maxIterations && FastMath . abs ( convergent - value ) > epsilon && q2 < maxDenominator ) { p0 = p1 ; p1 = p2 ; q0 = q1 ; q1 = q2 ; a0 = a1 ; r0 = r1 ; } else { stop = true ; } } while ( ! stop ) ; if ( n >= maxIterations ) { throw new FractionConversionException ( value , maxIterations ) ; } if ( q2 < maxDenominator ) { this . numerator = ( int ) p2 ; this . denominator = ( int ) q2 ; } else { this . numerator = ( int ) p1 ; this . denominator = ( int ) q1 ; } }
JacksonDatabind
14
src/main/java/com/fasterxml/jackson/databind/ObjectReader.java
1468
1486
Custom deserializer with parent object update
Hi, I have custom deserializer for `DataA`. An instance of `DataA` is contained in `DataB`, when updating an existing instance of `DataB` (as opposed to creating a new one) I get an exception when deserializing via a `JsonNode` object (deserializing via a `String` object works). ``` java import java.io.IOException; import org.junit.Assert; import org.junit.Test; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.JsonToken; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.deser.std.StdDeserializer; import com.fasterxml.jackson.databind.module.SimpleModule; public class TestDeserTest { static class DataA { public int i = 1; public int j = 2; } static class DataB { public DataA da = new DataA(); public int k = 3; } static class DataADeserializer extends StdDeserializer<DataA> { private static final long serialVersionUID = 1L; DataADeserializer() { super(DataA.class); } public DataA deserialize(JsonParser jp, DeserializationContext ctxt) throws JsonProcessingException, IOException { assert (jp.getCurrentToken() == JsonToken.START_OBJECT); JsonNode node = jp.getCodec().readTree(jp); DataA da = new DataA(); da.i = 5; return da; } } @Test public void test() throws IOException { ObjectMapper mapper = new ObjectMapper(); SimpleModule module = new SimpleModule(); module.addDeserializer(DataA.class, new DataADeserializer()); mapper.registerModule(module); DataB db = new DataB(); db.da.i = 11; db.k = 13; String jsonBString = mapper.writeValueAsString(db); JsonNode jsonBNode = mapper.valueToTree(db); // create parent DataB dbNewViaString = mapper.readValue(jsonBString, DataB.class); Assert.assertEquals(5, dbNewViaString.da.i); Assert.assertEquals(13, dbNewViaString.k); DataB dbNewViaNode = mapper.treeToValue(jsonBNode, DataB.class); Assert.assertEquals(5, dbNewViaNode.da.i); Assert.assertEquals(13, dbNewViaNode.k); // update parent DataB dbUpdViaString = new DataB(); DataB dbUpdViaNode = new DataB(); Assert.assertEquals(1, dbUpdViaString.da.i); Assert.assertEquals(3, dbUpdViaString.k); mapper.readerForUpdating(dbUpdViaString).readValue(jsonBString); Assert.assertEquals(5, dbUpdViaString.da.i); Assert.assertEquals(13, dbUpdViaString.k); Assert.assertEquals(1, dbUpdViaNode.da.i); Assert.assertEquals(3, dbUpdViaNode.k); // FAILS HERE: mapper.readerForUpdating(dbUpdViaNode).readValue(jsonBNode); Assert.assertEquals(5, dbUpdViaNode.da.i); Assert.assertEquals(13, dbUpdViaNode.k); } } ``` The trace: ``` java com.fasterxml.jackson.databind.exc.UnrecognizedPropertyException: Unrecognized field "i" (class myorg.TestDeserTest$DataB), not marked as ignorable (2 known properties: "da", "k"]) at [Source: N/A; line: -1, column: -1] (through reference chain: myorg.DataB["da"]->myorg.DataB["i"]) at com.fasterxml.jackson.databind.exc.UnrecognizedPropertyException.from(UnrecognizedPropertyException.java:51) at com.fasterxml.jackson.databind.DeserializationContext.reportUnknownProperty(DeserializationContext.java:817) at com.fasterxml.jackson.databind.deser.std.StdDeserializer.handleUnknownProperty(StdDeserializer.java:954) at com.fasterxml.jackson.databind.deser.BeanDeserializerBase.handleUnknownProperty(BeanDeserializerBase.java:1324) at com.fasterxml.jackson.databind.deser.BeanDeserializerBase.handleUnknownVanilla(BeanDeserializerBase.java:1302) at com.fasterxml.jackson.databind.deser.BeanDeserializer.vanillaDeserialize(BeanDeserializer.java:249) at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:136) at com.fasterxml.jackson.databind.ObjectReader._bindAsTree(ObjectReader.java:1478) at com.fasterxml.jackson.databind.ObjectReader.readTree(ObjectReader.java:1020) at myorg.TestDeserTest$DataADeserializer.deserialize(TestDeserTest.java:39) at myorg.TestDeserTest$DataADeserializer.deserialize(TestDeserTest.java:1) at com.fasterxml.jackson.databind.deser.SettableBeanProperty.deserialize(SettableBeanProperty.java:523) at com.fasterxml.jackson.databind.deser.impl.FieldProperty.deserializeAndSet(FieldProperty.java:101) at com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap.findDeserializeAndSet(BeanPropertyMap.java:285) at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:220) at com.fasterxml.jackson.databind.ObjectReader._bindAndClose(ObjectReader.java:1443) at com.fasterxml.jackson.databind.ObjectReader.readValue(ObjectReader.java:1154) at myorg.TestDeserTest.test(TestDeserTest.java:81) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:325) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:78) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) at org.junit.runners.ParentRunner$3.run(ParentRunner.java:290) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:71) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:288) at org.junit.runners.ParentRunner.access$000(ParentRunner.java:58) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:268) at org.junit.runners.ParentRunner.run(ParentRunner.java:363) at org.eclipse.jdt.internal.junit4.runner.JUnit4TestReference.run(JUnit4TestReference.java:50) at org.eclipse.jdt.internal.junit.runner.TestExecution.run(TestExecution.java:38) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:459) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:675) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.run(RemoteTestRunner.java:382) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.main(RemoteTestRunner.java:192) ```
protected JsonNode _bindAsTree(JsonParser jp) throws IOException { JsonNode result; JsonToken t = _initForReading(jp); if (t == JsonToken.VALUE_NULL || t == JsonToken.END_ARRAY || t == JsonToken.END_OBJECT) { result = NullNode.instance; } else { DeserializationContext ctxt = createDeserializationContext(jp, _config); JsonDeserializer<Object> deser = _findRootDeserializer(ctxt, JSON_NODE_TYPE); if (_unwrapRoot) { result = (JsonNode) _unwrapAndDeserialize(jp, ctxt, JSON_NODE_TYPE, deser); } else { result = (JsonNode) deser.deserialize(jp, ctxt); } } // Need to consume the token too jp.clearCurrentToken(); return result; }
protected JsonNode _bindAsTree ( JsonParser jp ) throws IOException { JsonNode result ; JsonToken t = _initForReading ( jp ) ; if ( t == JsonToken . VALUE_NULL || t == JsonToken . END_ARRAY || t == JsonToken . END_OBJECT ) { result = NullNode . instance ; } else { DeserializationContext ctxt = createDeserializationContext ( jp , _config ) ; JsonDeserializer < Object > deser = _findRootDeserializer ( ctxt , JSON_NODE_TYPE ) ; if ( _unwrapRoot ) { result = ( JsonNode ) _unwrapAndDeserialize ( jp , ctxt , JSON_NODE_TYPE , deser ) ; } else { result = ( JsonNode ) deser . deserialize ( jp , ctxt ) ; } } jp . clearCurrentToken ( ) ; return result ; }
protected JsonNode _bindAsTree(JsonParser jp) throws IOException { JsonNode result; JsonToken t = _initForReading(jp); if (t == JsonToken.VALUE_NULL || t == JsonToken.END_ARRAY || t == JsonToken.END_OBJECT) { result = NullNode.instance; } else { DeserializationContext ctxt = createDeserializationContext(jp, _config); JsonDeserializer<Object> deser = _findTreeDeserializer(ctxt); if (_unwrapRoot) { result = (JsonNode) _unwrapAndDeserialize(jp, ctxt, JSON_NODE_TYPE, deser); } else { result = (JsonNode) deser.deserialize(jp, ctxt); } } // Need to consume the token too jp.clearCurrentToken(); return result; }
protected JsonNode _bindAsTree ( JsonParser jp ) throws IOException { JsonNode result ; JsonToken t = _initForReading ( jp ) ; if ( t == JsonToken . VALUE_NULL || t == JsonToken . END_ARRAY || t == JsonToken . END_OBJECT ) { result = NullNode . instance ; } else { DeserializationContext ctxt = createDeserializationContext ( jp , _config ) ; JsonDeserializer < Object > deser = _findTreeDeserializer ( ctxt ) ; if ( _unwrapRoot ) { result = ( JsonNode ) _unwrapAndDeserialize ( jp , ctxt , JSON_NODE_TYPE , deser ) ; } else { result = ( JsonNode ) deser . deserialize ( jp , ctxt ) ; } } jp . clearCurrentToken ( ) ; return result ; }
Cli
27
src/java/org/apache/commons/cli/OptionGroup.java
86
106
Unable to select a pure long option in a group
OptionGroup doesn't play nice with options with a long name and no short name. If the selected option hasn't a short name, group.setSelected(option) has no effect.
public void setSelected(Option option) throws AlreadySelectedException { if (option == null) { // reset the option previously selected selected = null; return; } // if no option has already been selected or the // same option is being reselected then set the // selected member variable if (selected == null || selected.equals(option.getOpt())) { selected = option.getOpt(); } else { throw new AlreadySelectedException(this, option); } }
public void setSelected ( Option option ) throws AlreadySelectedException { if ( option == null ) { selected = null ; return ; } if ( selected == null || selected . equals ( option . getOpt ( ) ) ) { selected = option . getOpt ( ) ; } else { throw new AlreadySelectedException ( this , option ) ; } }
public void setSelected(Option option) throws AlreadySelectedException { if (option == null) { // reset the option previously selected selected = null; return; } // if no option has already been selected or the // same option is being reselected then set the // selected member variable if (selected == null || selected.equals(option.getKey())) { selected = option.getKey(); } else { throw new AlreadySelectedException(this, option); } }
public void setSelected ( Option option ) throws AlreadySelectedException { if ( option == null ) { selected = null ; return ; } if ( selected == null || selected . equals ( option . getKey ( ) ) ) { selected = option . getKey ( ) ; } else { throw new AlreadySelectedException ( this , option ) ; } }
Math
30
src/main/java/org/apache/commons/math3/stat/inference/MannWhitneyUTest.java
168
184
Mann-Whitney U Test Suffers From Integer Overflow With Large Data Sets
When performing a Mann-Whitney U Test on large data sets (the attached test uses two 1500 element sets), intermediate integer values used in calculateAsymptoticPValue can overflow, leading to invalid results, such as p-values of NaN, or incorrect calculations. Attached is a patch, including a test, and a fix, which modifies the affected code to use doubles
private double calculateAsymptoticPValue(final double Umin, final int n1, final int n2) throws ConvergenceException, MaxCountExceededException { final int n1n2prod = n1 * n2; // http://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U#Normal_approximation final double EU = n1n2prod / 2.0; final double VarU = n1n2prod * (n1 + n2 + 1) / 12.0; final double z = (Umin - EU) / FastMath.sqrt(VarU); final NormalDistribution standardNormal = new NormalDistribution(0, 1); return 2 * standardNormal.cumulativeProbability(z); }
private double calculateAsymptoticPValue ( final double Umin , final int n1 , final int n2 ) throws ConvergenceException , MaxCountExceededException { final int n1n2prod = n1 * n2 ; final double EU = n1n2prod / 2.0 ; final double VarU = n1n2prod * ( n1 + n2 + 1 ) / 12.0 ; final double z = ( Umin - EU ) / FastMath . sqrt ( VarU ) ; final NormalDistribution standardNormal = new NormalDistribution ( 0 , 1 ) ; return 2 * standardNormal . cumulativeProbability ( z ) ; }
private double calculateAsymptoticPValue(final double Umin, final int n1, final int n2) throws ConvergenceException, MaxCountExceededException { final double n1n2prod = n1 * n2; // http://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U#Normal_approximation final double EU = n1n2prod / 2.0; final double VarU = n1n2prod * (n1 + n2 + 1) / 12.0; final double z = (Umin - EU) / FastMath.sqrt(VarU); final NormalDistribution standardNormal = new NormalDistribution(0, 1); return 2 * standardNormal.cumulativeProbability(z); }
private double calculateAsymptoticPValue ( final double Umin , final int n1 , final int n2 ) throws ConvergenceException , MaxCountExceededException { final double n1n2prod = n1 * n2 ; final double EU = n1n2prod / 2.0 ; final double VarU = n1n2prod * ( n1 + n2 + 1 ) / 12.0 ; final double z = ( Umin - EU ) / FastMath . sqrt ( VarU ) ; final NormalDistribution standardNormal = new NormalDistribution ( 0 , 1 ) ; return 2 * standardNormal . cumulativeProbability ( z ) ; }
JacksonCore
23
src/main/java/com/fasterxml/jackson/core/util/DefaultPrettyPrinter.java
253
256
Make `DefaultPrettyPrinter.createInstance()` to fail for sub-classes
Pattern of "blueprint object" (that is, having an instance not used as-is, but that has factory method for creating actual instance) is used by Jackson in couple of places; often for things that implement `Instantiatable`. But one problem is that unless method is left abstract, sub-classing can be problematic -- if sub-class does not override method, then calls will result in an instance of wrong type being created. And this is what can easily happen with `DefaultPrettyPrinter`. A simple solution is for base class to make explicit that if base implementation is called, then instance can not be a sub-class (that is, it is only legal to call on `DefaultPrettyPrinter`, but no sub-class). This is not optimal (ideally check would be done compile-time), but better than getting a mysterious failure.
@Override public DefaultPrettyPrinter createInstance() { return new DefaultPrettyPrinter(this); }
@ Override public DefaultPrettyPrinter createInstance ( ) { return new DefaultPrettyPrinter ( this ) ; }
@Override public DefaultPrettyPrinter createInstance() { if (getClass() != DefaultPrettyPrinter.class) { // since 2.10 throw new IllegalStateException("Failed `createInstance()`: "+getClass().getName() +" does not override method; it has to"); } return new DefaultPrettyPrinter(this); }
@ Override public DefaultPrettyPrinter createInstance ( ) { if ( getClass ( ) != DefaultPrettyPrinter . class ) { throw new IllegalStateException ( "Failed `createInstance()`: " + getClass ( ) . getName ( ) + " does not override method; it has to" ) ; } return new DefaultPrettyPrinter ( this ) ; }
Math
88
src/java/org/apache/commons/math/optimization/linear/SimplexTableau.java
324
345
Simplex Solver arrives at incorrect solution
I have reduced the problem reported to me down to a minimal test case which I will attach.
protected RealPointValuePair getSolution() { double[] coefficients = new double[getOriginalNumDecisionVariables()]; Integer basicRow = getBasicRow(getNumObjectiveFunctions() + getOriginalNumDecisionVariables()); double mostNegative = basicRow == null ? 0 : getEntry(basicRow, getRhsOffset()); for (int i = 0; i < coefficients.length; i++) { basicRow = getBasicRow(getNumObjectiveFunctions() + i); // if multiple variables can take a given value // then we choose the first and set the rest equal to 0 coefficients[i] = (basicRow == null ? 0 : getEntry(basicRow, getRhsOffset())) - (restrictToNonNegative ? 0 : mostNegative); if (basicRow != null) { for (int j = getNumObjectiveFunctions(); j < getNumObjectiveFunctions() + i; j++) { if (tableau.getEntry(basicRow, j) == 1) { coefficients[i] = 0; } } } } return new RealPointValuePair(coefficients, f.getValue(coefficients)); }
protected RealPointValuePair getSolution ( ) { double [ ] coefficients = new double [ getOriginalNumDecisionVariables ( ) ] ; Integer basicRow = getBasicRow ( getNumObjectiveFunctions ( ) + getOriginalNumDecisionVariables ( ) ) ; double mostNegative = basicRow == null ? 0 : getEntry ( basicRow , getRhsOffset ( ) ) ; for ( int i = 0 ; i < coefficients . length ; i ++ ) { basicRow = getBasicRow ( getNumObjectiveFunctions ( ) + i ) ; coefficients [ i ] = ( basicRow == null ? 0 : getEntry ( basicRow , getRhsOffset ( ) ) ) - ( restrictToNonNegative ? 0 : mostNegative ) ; if ( basicRow != null ) { for ( int j = getNumObjectiveFunctions ( ) ; j < getNumObjectiveFunctions ( ) + i ; j ++ ) { if ( tableau . getEntry ( basicRow , j ) == 1 ) { coefficients [ i ] = 0 ; } } } } return new RealPointValuePair ( coefficients , f . getValue ( coefficients ) ) ; }
protected RealPointValuePair getSolution() { double[] coefficients = new double[getOriginalNumDecisionVariables()]; Integer basicRow = getBasicRow(getNumObjectiveFunctions() + getOriginalNumDecisionVariables()); double mostNegative = basicRow == null ? 0 : getEntry(basicRow, getRhsOffset()); Set<Integer> basicRows = new HashSet<Integer>(); for (int i = 0; i < coefficients.length; i++) { basicRow = getBasicRow(getNumObjectiveFunctions() + i); if (basicRows.contains(basicRow)) { // if multiple variables can take a given value // then we choose the first and set the rest equal to 0 coefficients[i] = 0; } else { basicRows.add(basicRow); coefficients[i] = (basicRow == null ? 0 : getEntry(basicRow, getRhsOffset())) - (restrictToNonNegative ? 0 : mostNegative); } } return new RealPointValuePair(coefficients, f.getValue(coefficients)); }
protected RealPointValuePair getSolution ( ) { double [ ] coefficients = new double [ getOriginalNumDecisionVariables ( ) ] ; Integer basicRow = getBasicRow ( getNumObjectiveFunctions ( ) + getOriginalNumDecisionVariables ( ) ) ; double mostNegative = basicRow == null ? 0 : getEntry ( basicRow , getRhsOffset ( ) ) ; Set < Integer > basicRows = new HashSet < Integer > ( ) ; for ( int i = 0 ; i < coefficients . length ; i ++ ) { basicRow = getBasicRow ( getNumObjectiveFunctions ( ) + i ) ; if ( basicRows . contains ( basicRow ) ) { coefficients [ i ] = 0 ; } else { basicRows . add ( basicRow ) ; coefficients [ i ] = ( basicRow == null ? 0 : getEntry ( basicRow , getRhsOffset ( ) ) ) - ( restrictToNonNegative ? 0 : mostNegative ) ; } } return new RealPointValuePair ( coefficients , f . getValue ( coefficients ) ) ; }
Csv
9
src/main/java/org/apache/commons/csv/CSVRecord.java
179
187
CSVRecord.toMap() throws NPE on formats with no headers.
The method toMap() on CSVRecord throws a NullPointerExcpetion when called on records derived using a format with no headers. The method documentation states a null map should be returned instead.
<M extends Map<String, String>> M putIn(final M map) { for (final Entry<String, Integer> entry : mapping.entrySet()) { final int col = entry.getValue().intValue(); if (col < values.length) { map.put(entry.getKey(), values[col]); } } return map; }
< M extends Map < String , String > > M putIn ( final M map ) { for ( final Entry < String , Integer > entry : mapping . entrySet ( ) ) { final int col = entry . getValue ( ) . intValue ( ) ; if ( col < values . length ) { map . put ( entry . getKey ( ) , values [ col ] ) ; } } return map ; }
<M extends Map<String, String>> M putIn(final M map) { if (mapping == null) { return map; } for (final Entry<String, Integer> entry : mapping.entrySet()) { final int col = entry.getValue().intValue(); if (col < values.length) { map.put(entry.getKey(), values[col]); } } return map; }
< M extends Map < String , String > > M putIn ( final M map ) { if ( mapping == null ) { return map ; } for ( final Entry < String , Integer > entry : mapping . entrySet ( ) ) { final int col = entry . getValue ( ) . intValue ( ) ; if ( col < values . length ) { map . put ( entry . getKey ( ) , values [ col ] ) ; } } return map ; }
JacksonDatabind
55
src/main/java/com/fasterxml/jackson/databind/ser/std/StdKeySerializers.java
67
86
EnumMap keys not using enum's `@JsonProperty` values unlike Enum values
Based on these issues: https://github.com/FasterXML/jackson-databind/issues/677 https://github.com/FasterXML/jackson-databind/issues/1148 https://github.com/FasterXML/jackson-annotations/issues/96 I implemented @JsonProperty for my enum constants and they show up nicely when they are property values. But I also have an EnumMap which uses the enum, and it's generated JSON uses the original enum names for the keys and not the JsonProperty values. Using 2.8.1 (in spring boot 4.3.2) Thanks!
@SuppressWarnings("unchecked") public static JsonSerializer<Object> getFallbackKeySerializer(SerializationConfig config, Class<?> rawKeyType) { if (rawKeyType != null) { // 29-Sep-2015, tatu: Odd case here, of `Enum`, which we may get for `EnumMap`; not sure // if that is a bug or feature. Regardless, it seems to require dynamic handling // (compared to getting actual fully typed Enum). // Note that this might even work from the earlier point, but let's play it safe for now // 11-Aug-2016, tatu: Turns out we get this if `EnumMap` is the root value because // then there is no static type if (rawKeyType == Enum.class) { return new Dynamic(); } if (rawKeyType.isEnum()) { return new Default(Default.TYPE_ENUM, rawKeyType); } } return DEFAULT_KEY_SERIALIZER; }
@ SuppressWarnings ( "unchecked" ) public static JsonSerializer < Object > getFallbackKeySerializer ( SerializationConfig config , Class < ? > rawKeyType ) { if ( rawKeyType != null ) { if ( rawKeyType == Enum . class ) { return new Dynamic ( ) ; } if ( rawKeyType . isEnum ( ) ) { return new Default ( Default . TYPE_ENUM , rawKeyType ) ; } } return DEFAULT_KEY_SERIALIZER ; }
@SuppressWarnings("unchecked") public static JsonSerializer<Object> getFallbackKeySerializer(SerializationConfig config, Class<?> rawKeyType) { if (rawKeyType != null) { // 29-Sep-2015, tatu: Odd case here, of `Enum`, which we may get for `EnumMap`; not sure // if that is a bug or feature. Regardless, it seems to require dynamic handling // (compared to getting actual fully typed Enum). // Note that this might even work from the earlier point, but let's play it safe for now // 11-Aug-2016, tatu: Turns out we get this if `EnumMap` is the root value because // then there is no static type if (rawKeyType == Enum.class) { return new Dynamic(); } if (rawKeyType.isEnum()) { return EnumKeySerializer.construct(rawKeyType, EnumValues.constructFromName(config, (Class<Enum<?>>) rawKeyType)); } } return DEFAULT_KEY_SERIALIZER; }
@ SuppressWarnings ( "unchecked" ) public static JsonSerializer < Object > getFallbackKeySerializer ( SerializationConfig config , Class < ? > rawKeyType ) { if ( rawKeyType != null ) { if ( rawKeyType == Enum . class ) { return new Dynamic ( ) ; } if ( rawKeyType . isEnum ( ) ) { return EnumKeySerializer . construct ( rawKeyType , EnumValues . constructFromName ( config , ( Class < Enum < ? > > ) rawKeyType ) ) ; } } return DEFAULT_KEY_SERIALIZER ; }
Cli
11
src/java/org/apache/commons/cli/HelpFormatter.java
613
642
PosixParser interupts "-target opt" as "-t arget opt"
This was posted on the Commons-Developer list and confirmed as a bug. > Is this a bug? Or am I using this incorrectly? > I have an option with short and long values. Given code that is > essentially what is below, with a PosixParser I see results as > follows: > > A command line with just "-t" prints out the results of the catch > block > (OK) > A command line with just "-target" prints out the results of the catch > block (OK) > A command line with just "-t foobar.com" prints out "processing selected > target: foobar.com" (OK) > A command line with just "-target foobar.com" prints out "processing > selected target: arget" (ERROR?) > > ====================================================================== > == > ======================= > private static final String OPTION_TARGET = "t"; > private static final String OPTION_TARGET_LONG = "target"; > // ... > Option generateTarget = new Option(OPTION_TARGET, > OPTION_TARGET_LONG, > true, > "Generate files for the specified > target machine"); > // ... > try { > parsedLine = parser.parse(cmdLineOpts, args); > } catch (ParseException pe) { > System.out.println("Invalid command: " + pe.getMessage() + > "\n"); > HelpFormatter hf = new HelpFormatter(); > hf.printHelp(USAGE, cmdLineOpts); > System.exit(-1); > } > > if (parsedLine.hasOption(OPTION_TARGET)) { > System.out.println("processing selected target: " + > parsedLine.getOptionValue(OPTION_TARGET)); > } It is a bug but it is due to well defined behaviour (so that makes me feel a little better about myself ;). To support *special* (well I call them special anyway) like -Dsystem.property=value we need to be able to examine the first character of an option. If the first character is itself defined as an Option then the remainder of the token is used as the value, e.g. 'D' is the token, it is an option so 'system.property=value' is the argument value for that option. This is the behaviour that we are seeing for your example. 't' is the token, it is an options so 'arget' is the argument value. I suppose a solution to this could be to have a way to specify properties for parsers. In this case 'posix.special.option == true' for turning on *special* options. I'll have a look into this and let you know. Just to keep track of this and to get you used to how we operate, can you log a bug in bugzilla for this. Thanks, -John K
private static void appendOption(final StringBuffer buff, final Option option, final boolean required) { if (!required) { buff.append("["); } if (option.getOpt() != null) { buff.append("-").append(option.getOpt()); } else { buff.append("--").append(option.getLongOpt()); } // if the Option has a value if (option.hasArg() && (option.getArgName() != null)) { buff.append(" <").append(option.getArgName()).append(">"); } // if the Option is not a required option if (!required) { buff.append("]"); } }
private static void appendOption ( final StringBuffer buff , final Option option , final boolean required ) { if ( ! required ) { buff . append ( "[" ) ; } if ( option . getOpt ( ) != null ) { buff . append ( "-" ) . append ( option . getOpt ( ) ) ; } else { buff . append ( "--" ) . append ( option . getLongOpt ( ) ) ; } if ( option . hasArg ( ) && ( option . getArgName ( ) != null ) ) { buff . append ( " <" ) . append ( option . getArgName ( ) ) . append ( ">" ) ; } if ( ! required ) { buff . append ( "]" ) ; } }
private static void appendOption(final StringBuffer buff, final Option option, final boolean required) { if (!required) { buff.append("["); } if (option.getOpt() != null) { buff.append("-").append(option.getOpt()); } else { buff.append("--").append(option.getLongOpt()); } // if the Option has a value if (option.hasArg() && option.hasArgName()) { buff.append(" <").append(option.getArgName()).append(">"); } // if the Option is not a required option if (!required) { buff.append("]"); } }
private static void appendOption ( final StringBuffer buff , final Option option , final boolean required ) { if ( ! required ) { buff . append ( "[" ) ; } if ( option . getOpt ( ) != null ) { buff . append ( "-" ) . append ( option . getOpt ( ) ) ; } else { buff . append ( "--" ) . append ( option . getLongOpt ( ) ) ; } if ( option . hasArg ( ) && option . hasArgName ( ) ) { buff . append ( " <" ) . append ( option . getArgName ( ) ) . append ( ">" ) ; } if ( ! required ) { buff . append ( "]" ) ; } }
Math
10
src/main/java/org/apache/commons/math3/analysis/differentiation/DSCompiler.java
1382
1420
DerivativeStructure.atan2(y,x) does not handle special cases properly
The four special cases +/-0 for both x and y should give the same values as Math.atan2 and FastMath.atan2. However, they give NaN for the value in all cases.
public void atan2(final double[] y, final int yOffset, final double[] x, final int xOffset, final double[] result, final int resultOffset) { // compute r = sqrt(x^2+y^2) double[] tmp1 = new double[getSize()]; multiply(x, xOffset, x, xOffset, tmp1, 0); // x^2 double[] tmp2 = new double[getSize()]; multiply(y, yOffset, y, yOffset, tmp2, 0); // y^2 add(tmp1, 0, tmp2, 0, tmp2, 0); // x^2 + y^2 rootN(tmp2, 0, 2, tmp1, 0); // r = sqrt(x^2 + y^2) if (x[xOffset] >= 0) { // compute atan2(y, x) = 2 atan(y / (r + x)) add(tmp1, 0, x, xOffset, tmp2, 0); // r + x divide(y, yOffset, tmp2, 0, tmp1, 0); // y /(r + x) atan(tmp1, 0, tmp2, 0); // atan(y / (r + x)) for (int i = 0; i < tmp2.length; ++i) { result[resultOffset + i] = 2 * tmp2[i]; // 2 * atan(y / (r + x)) } } else { // compute atan2(y, x) = +/- pi - 2 atan(y / (r - x)) subtract(tmp1, 0, x, xOffset, tmp2, 0); // r - x divide(y, yOffset, tmp2, 0, tmp1, 0); // y /(r - x) atan(tmp1, 0, tmp2, 0); // atan(y / (r - x)) result[resultOffset] = ((tmp2[0] <= 0) ? -FastMath.PI : FastMath.PI) - 2 * tmp2[0]; // +/-pi - 2 * atan(y / (r - x)) for (int i = 1; i < tmp2.length; ++i) { result[resultOffset + i] = -2 * tmp2[i]; // +/-pi - 2 * atan(y / (r - x)) } } // fix value to take special cases (+0/+0, +0/-0, -0/+0, -0/-0, +/-infinity) correctly }
public void atan2 ( final double [ ] y , final int yOffset , final double [ ] x , final int xOffset , final double [ ] result , final int resultOffset ) { double [ ] tmp1 = new double [ getSize ( ) ] ; multiply ( x , xOffset , x , xOffset , tmp1 , 0 ) ; double [ ] tmp2 = new double [ getSize ( ) ] ; multiply ( y , yOffset , y , yOffset , tmp2 , 0 ) ; add ( tmp1 , 0 , tmp2 , 0 , tmp2 , 0 ) ; rootN ( tmp2 , 0 , 2 , tmp1 , 0 ) ; if ( x [ xOffset ] >= 0 ) { add ( tmp1 , 0 , x , xOffset , tmp2 , 0 ) ; divide ( y , yOffset , tmp2 , 0 , tmp1 , 0 ) ; atan ( tmp1 , 0 , tmp2 , 0 ) ; for ( int i = 0 ; i < tmp2 . length ; ++ i ) { result [ resultOffset + i ] = 2 * tmp2 [ i ] ; } } else { subtract ( tmp1 , 0 , x , xOffset , tmp2 , 0 ) ; divide ( y , yOffset , tmp2 , 0 , tmp1 , 0 ) ; atan ( tmp1 , 0 , tmp2 , 0 ) ; result [ resultOffset ] = ( ( tmp2 [ 0 ] <= 0 ) ? - FastMath . PI : FastMath . PI ) - 2 * tmp2 [ 0 ] ; for ( int i = 1 ; i < tmp2 . length ; ++ i ) { result [ resultOffset + i ] = - 2 * tmp2 [ i ] ; } } }
public void atan2(final double[] y, final int yOffset, final double[] x, final int xOffset, final double[] result, final int resultOffset) { // compute r = sqrt(x^2+y^2) double[] tmp1 = new double[getSize()]; multiply(x, xOffset, x, xOffset, tmp1, 0); // x^2 double[] tmp2 = new double[getSize()]; multiply(y, yOffset, y, yOffset, tmp2, 0); // y^2 add(tmp1, 0, tmp2, 0, tmp2, 0); // x^2 + y^2 rootN(tmp2, 0, 2, tmp1, 0); // r = sqrt(x^2 + y^2) if (x[xOffset] >= 0) { // compute atan2(y, x) = 2 atan(y / (r + x)) add(tmp1, 0, x, xOffset, tmp2, 0); // r + x divide(y, yOffset, tmp2, 0, tmp1, 0); // y /(r + x) atan(tmp1, 0, tmp2, 0); // atan(y / (r + x)) for (int i = 0; i < tmp2.length; ++i) { result[resultOffset + i] = 2 * tmp2[i]; // 2 * atan(y / (r + x)) } } else { // compute atan2(y, x) = +/- pi - 2 atan(y / (r - x)) subtract(tmp1, 0, x, xOffset, tmp2, 0); // r - x divide(y, yOffset, tmp2, 0, tmp1, 0); // y /(r - x) atan(tmp1, 0, tmp2, 0); // atan(y / (r - x)) result[resultOffset] = ((tmp2[0] <= 0) ? -FastMath.PI : FastMath.PI) - 2 * tmp2[0]; // +/-pi - 2 * atan(y / (r - x)) for (int i = 1; i < tmp2.length; ++i) { result[resultOffset + i] = -2 * tmp2[i]; // +/-pi - 2 * atan(y / (r - x)) } } // fix value to take special cases (+0/+0, +0/-0, -0/+0, -0/-0, +/-infinity) correctly result[resultOffset] = FastMath.atan2(y[yOffset], x[xOffset]); }
public void atan2 ( final double [ ] y , final int yOffset , final double [ ] x , final int xOffset , final double [ ] result , final int resultOffset ) { double [ ] tmp1 = new double [ getSize ( ) ] ; multiply ( x , xOffset , x , xOffset , tmp1 , 0 ) ; double [ ] tmp2 = new double [ getSize ( ) ] ; multiply ( y , yOffset , y , yOffset , tmp2 , 0 ) ; add ( tmp1 , 0 , tmp2 , 0 , tmp2 , 0 ) ; rootN ( tmp2 , 0 , 2 , tmp1 , 0 ) ; if ( x [ xOffset ] >= 0 ) { add ( tmp1 , 0 , x , xOffset , tmp2 , 0 ) ; divide ( y , yOffset , tmp2 , 0 , tmp1 , 0 ) ; atan ( tmp1 , 0 , tmp2 , 0 ) ; for ( int i = 0 ; i < tmp2 . length ; ++ i ) { result [ resultOffset + i ] = 2 * tmp2 [ i ] ; } } else { subtract ( tmp1 , 0 , x , xOffset , tmp2 , 0 ) ; divide ( y , yOffset , tmp2 , 0 , tmp1 , 0 ) ; atan ( tmp1 , 0 , tmp2 , 0 ) ; result [ resultOffset ] = ( ( tmp2 [ 0 ] <= 0 ) ? - FastMath . PI : FastMath . PI ) - 2 * tmp2 [ 0 ] ; for ( int i = 1 ; i < tmp2 . length ; ++ i ) { result [ resultOffset + i ] = - 2 * tmp2 [ i ] ; } } result [ resultOffset ] = FastMath . atan2 ( y [ yOffset ] , x [ xOffset ] ) ; }
JxPath
20
src/java/org/apache/commons/jxpath/ri/compiler/CoreOperationRelationalExpression.java
71
99
relational operations do not function properly when comparing a non-Iterator LHS to an Iterator RHS
I have a simple JXpathContext, with the following variables: var1=0, var2=0, var3=1. When I try to evaluate the following expression - "$var1 + $var2 <= $var3", it returns false.
private boolean compute(Object left, Object right) { left = reduce(left); right = reduce(right); if (left instanceof InitialContext) { ((InitialContext) left).reset(); } if (right instanceof InitialContext) { ((InitialContext) right).reset(); } if (left instanceof Iterator && right instanceof Iterator) { return findMatch((Iterator) left, (Iterator) right); } if (left instanceof Iterator) { return containsMatch((Iterator) left, right); } if (right instanceof Iterator) { return containsMatch((Iterator) right, left); } double ld = InfoSetUtil.doubleValue(left); if (Double.isNaN(ld)) { return false; } double rd = InfoSetUtil.doubleValue(right); if (Double.isNaN(rd)) { return false; } return evaluateCompare(ld == rd ? 0 : ld < rd ? -1 : 1); }
private boolean compute ( Object left , Object right ) { left = reduce ( left ) ; right = reduce ( right ) ; if ( left instanceof InitialContext ) { ( ( InitialContext ) left ) . reset ( ) ; } if ( right instanceof InitialContext ) { ( ( InitialContext ) right ) . reset ( ) ; } if ( left instanceof Iterator && right instanceof Iterator ) { return findMatch ( ( Iterator ) left , ( Iterator ) right ) ; } if ( left instanceof Iterator ) { return containsMatch ( ( Iterator ) left , right ) ; } if ( right instanceof Iterator ) { return containsMatch ( ( Iterator ) right , left ) ; } double ld = InfoSetUtil . doubleValue ( left ) ; if ( Double . isNaN ( ld ) ) { return false ; } double rd = InfoSetUtil . doubleValue ( right ) ; if ( Double . isNaN ( rd ) ) { return false ; } return evaluateCompare ( ld == rd ? 0 : ld < rd ? - 1 : 1 ) ; }
private boolean compute(Object left, Object right) { left = reduce(left); right = reduce(right); if (left instanceof InitialContext) { ((InitialContext) left).reset(); } if (right instanceof InitialContext) { ((InitialContext) right).reset(); } if (left instanceof Iterator && right instanceof Iterator) { return findMatch((Iterator) left, (Iterator) right); } if (left instanceof Iterator) { return containsMatch((Iterator) left, right); } if (right instanceof Iterator) { return containsMatch(left, (Iterator) right); } double ld = InfoSetUtil.doubleValue(left); if (Double.isNaN(ld)) { return false; } double rd = InfoSetUtil.doubleValue(right); if (Double.isNaN(rd)) { return false; } return evaluateCompare(ld == rd ? 0 : ld < rd ? -1 : 1); }
private boolean compute ( Object left , Object right ) { left = reduce ( left ) ; right = reduce ( right ) ; if ( left instanceof InitialContext ) { ( ( InitialContext ) left ) . reset ( ) ; } if ( right instanceof InitialContext ) { ( ( InitialContext ) right ) . reset ( ) ; } if ( left instanceof Iterator && right instanceof Iterator ) { return findMatch ( ( Iterator ) left , ( Iterator ) right ) ; } if ( left instanceof Iterator ) { return containsMatch ( ( Iterator ) left , right ) ; } if ( right instanceof Iterator ) { return containsMatch ( left , ( Iterator ) right ) ; } double ld = InfoSetUtil . doubleValue ( left ) ; if ( Double . isNaN ( ld ) ) { return false ; } double rd = InfoSetUtil . doubleValue ( right ) ; if ( Double . isNaN ( rd ) ) { return false ; } return evaluateCompare ( ld == rd ? 0 : ld < rd ? - 1 : 1 ) ; }
JacksonDatabind
63
src/main/java/com/fasterxml/jackson/databind/JsonMappingException.java
119
152
Reference-chain hints use incorrect class-name for inner classes
``` java import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.jupiter.api.Test; import java.io.IOException; import static com.google.common.truth.Truth.assertThat; import static org.junit.jupiter.api.Assertions.expectThrows; public class ReferenceChainTest { // illustrates that jackson's "reference chain" help-text uses incorrect class-names for inner classes @Test public void incorrectReferenceChain() throws IOException { JsonMappingException jsonMappingException = expectThrows(JsonMappingException.class, () -> { ObjectMapper objectMapper = new ObjectMapper(); objectMapper.readValue(objectMapper.writeValueAsBytes(new Outer()), Outer.class); }); JsonMappingException.Reference reference = jsonMappingException.getPath().get(0); assertThat(reference.toString()).isEqualTo("ReferenceChainTest$Outer[\"inner\"]"); } static class Outer { public Inner inner = new Inner(); } static class Inner { public int x; @JsonCreator public static Inner create(@JsonProperty("x") int x) { throw new RuntimeException("test-exception"); } } } ```
public String getDescription() { if (_desc == null) { StringBuilder sb = new StringBuilder(); if (_from == null) { // can this ever occur? sb.append("UNKNOWN"); } else { Class<?> cls = (_from instanceof Class<?>) ? (Class<?>)_from : _from.getClass(); // Hmmh. Although Class.getName() is mostly ok, it does look // butt-ugly for arrays. // 06-Oct-2016, tatu: as per [databind#1403], `getSimpleName()` not so good // as it drops enclosing class. So let's try bit different approach String pkgName = ClassUtil.getPackageName(cls); if (pkgName != null) { sb.append(pkgName); sb.append('.'); } sb.append(cls.getSimpleName()); } sb.append('['); if (_fieldName != null) { sb.append('"'); sb.append(_fieldName); sb.append('"'); } else if (_index >= 0) { sb.append(_index); } else { sb.append('?'); } sb.append(']'); _desc = sb.toString(); } return _desc; }
public String getDescription ( ) { if ( _desc == null ) { StringBuilder sb = new StringBuilder ( ) ; if ( _from == null ) { sb . append ( "UNKNOWN" ) ; } else { Class < ? > cls = ( _from instanceof Class < ? > ) ? ( Class < ? > ) _from : _from . getClass ( ) ; String pkgName = ClassUtil . getPackageName ( cls ) ; if ( pkgName != null ) { sb . append ( pkgName ) ; sb . append ( '.' ) ; } sb . append ( cls . getSimpleName ( ) ) ; } sb . append ( '[' ) ; if ( _fieldName != null ) { sb . append ( '"' ) ; sb . append ( _fieldName ) ; sb . append ( '"' ) ; } else if ( _index >= 0 ) { sb . append ( _index ) ; } else { sb . append ( '?' ) ; } sb . append ( ']' ) ; _desc = sb . toString ( ) ; } return _desc ; }
public String getDescription() { if (_desc == null) { StringBuilder sb = new StringBuilder(); if (_from == null) { // can this ever occur? sb.append("UNKNOWN"); } else { Class<?> cls = (_from instanceof Class<?>) ? (Class<?>)_from : _from.getClass(); // Hmmh. Although Class.getName() is mostly ok, it does look // butt-ugly for arrays. // 06-Oct-2016, tatu: as per [databind#1403], `getSimpleName()` not so good // as it drops enclosing class. So let's try bit different approach int arrays = 0; while (cls.isArray()) { cls = cls.getComponentType(); ++arrays; } sb.append(cls.getName()); while (--arrays >= 0) { sb.append("[]"); } /* was: String pkgName = ClassUtil.getPackageName(cls); if (pkgName != null) { sb.append(pkgName); sb.append('.'); } */ } sb.append('['); if (_fieldName != null) { sb.append('"'); sb.append(_fieldName); sb.append('"'); } else if (_index >= 0) { sb.append(_index); } else { sb.append('?'); } sb.append(']'); _desc = sb.toString(); } return _desc; }
public String getDescription ( ) { if ( _desc == null ) { StringBuilder sb = new StringBuilder ( ) ; if ( _from == null ) { sb . append ( "UNKNOWN" ) ; } else { Class < ? > cls = ( _from instanceof Class < ? > ) ? ( Class < ? > ) _from : _from . getClass ( ) ; int arrays = 0 ; while ( cls . isArray ( ) ) { cls = cls . getComponentType ( ) ; ++ arrays ; } sb . append ( cls . getName ( ) ) ; while ( -- arrays >= 0 ) { sb . append ( "[]" ) ; } } sb . append ( '[' ) ; if ( _fieldName != null ) { sb . append ( '"' ) ; sb . append ( _fieldName ) ; sb . append ( '"' ) ; } else if ( _index >= 0 ) { sb . append ( _index ) ; } else { sb . append ( '?' ) ; } sb . append ( ']' ) ; _desc = sb . toString ( ) ; } return _desc ; }
Math
51
src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java
128
247
"RegulaFalsiSolver" failure
The following unit test: {code} @Test public void testBug() { final UnivariateRealFunction f = new UnivariateRealFunction() { @Override public double value(double x) { return Math.exp(x) - Math.pow(Math.PI, 3.0); } }; UnivariateRealSolver solver = new RegulaFalsiSolver(); double root = solver.solve(100, f, 1, 10); } {code} fails with {noformat} illegal state: maximal count (100) exceeded: evaluations {noformat} Using "PegasusSolver", the answer is found after 17 evaluations.
protected final double doSolve() { // Get initial solution double x0 = getMin(); double x1 = getMax(); double f0 = computeObjectiveValue(x0); double f1 = computeObjectiveValue(x1); // If one of the bounds is the exact root, return it. Since these are // not under-approximations or over-approximations, we can return them // regardless of the allowed solutions. if (f0 == 0.0) { return x0; } if (f1 == 0.0) { return x1; } // Verify bracketing of initial solution. verifyBracketing(x0, x1); // Get accuracies. final double ftol = getFunctionValueAccuracy(); final double atol = getAbsoluteAccuracy(); final double rtol = getRelativeAccuracy(); // Keep track of inverted intervals, meaning that the left bound is // larger than the right bound. boolean inverted = false; // Keep finding better approximations. while (true) { // Calculate the next approximation. final double x = x1 - ((f1 * (x1 - x0)) / (f1 - f0)); final double fx = computeObjectiveValue(x); // If the new approximation is the exact root, return it. Since // this is not an under-approximation or an over-approximation, // we can return it regardless of the allowed solutions. if (fx == 0.0) { return x; } // Update the bounds with the new approximation. if (f1 * fx < 0) { // The value of x1 has switched to the other bound, thus inverting // the interval. x0 = x1; f0 = f1; inverted = !inverted; } else { switch (method) { case ILLINOIS: f0 *= 0.5; break; case PEGASUS: f0 *= f1 / (f1 + fx); break; // Update formula cannot make any progress: Update the // search interval. default: // Should never happen. } } // Update from [x0, x1] to [x0, x]. x1 = x; f1 = fx; // If the function value of the last approximation is too small, // given the function value accuracy, then we can't get closer to // the root than we already are. if (FastMath.abs(f1) <= ftol) { switch (allowed) { case ANY_SIDE: return x1; case LEFT_SIDE: if (inverted) { return x1; } break; case RIGHT_SIDE: if (!inverted) { return x1; } break; case BELOW_SIDE: if (f1 <= 0) { return x1; } break; case ABOVE_SIDE: if (f1 >= 0) { return x1; } break; default: throw new MathInternalError(); } } // If the current interval is within the given accuracies, we // are satisfied with the current approximation. if (FastMath.abs(x1 - x0) < FastMath.max(rtol * FastMath.abs(x1), atol)) { switch (allowed) { case ANY_SIDE: return x1; case LEFT_SIDE: return inverted ? x1 : x0; case RIGHT_SIDE: return inverted ? x0 : x1; case BELOW_SIDE: return (f1 <= 0) ? x1 : x0; case ABOVE_SIDE: return (f1 >= 0) ? x1 : x0; default: throw new MathInternalError(); } } } }
protected final double doSolve ( ) { double x0 = getMin ( ) ; double x1 = getMax ( ) ; double f0 = computeObjectiveValue ( x0 ) ; double f1 = computeObjectiveValue ( x1 ) ; if ( f0 == 0.0 ) { return x0 ; } if ( f1 == 0.0 ) { return x1 ; } verifyBracketing ( x0 , x1 ) ; final double ftol = getFunctionValueAccuracy ( ) ; final double atol = getAbsoluteAccuracy ( ) ; final double rtol = getRelativeAccuracy ( ) ; boolean inverted = false ; while ( true ) { final double x = x1 - ( ( f1 * ( x1 - x0 ) ) / ( f1 - f0 ) ) ; final double fx = computeObjectiveValue ( x ) ; if ( fx == 0.0 ) { return x ; } if ( f1 * fx < 0 ) { x0 = x1 ; f0 = f1 ; inverted = ! inverted ; } else { switch ( method ) { case ILLINOIS : f0 *= 0.5 ; break ; case PEGASUS : f0 *= f1 / ( f1 + fx ) ; break ; default : } } x1 = x ; f1 = fx ; if ( FastMath . abs ( f1 ) <= ftol ) { switch ( allowed ) { case ANY_SIDE : return x1 ; case LEFT_SIDE : if ( inverted ) { return x1 ; } break ; case RIGHT_SIDE : if ( ! inverted ) { return x1 ; } break ; case BELOW_SIDE : if ( f1 <= 0 ) { return x1 ; } break ; case ABOVE_SIDE : if ( f1 >= 0 ) { return x1 ; } break ; default : throw new MathInternalError ( ) ; } } if ( FastMath . abs ( x1 - x0 ) < FastMath . max ( rtol * FastMath . abs ( x1 ) , atol ) ) { switch ( allowed ) { case ANY_SIDE : return x1 ; case LEFT_SIDE : return inverted ? x1 : x0 ; case RIGHT_SIDE : return inverted ? x0 : x1 ; case BELOW_SIDE : return ( f1 <= 0 ) ? x1 : x0 ; case ABOVE_SIDE : return ( f1 >= 0 ) ? x1 : x0 ; default : throw new MathInternalError ( ) ; } } } }
protected final double doSolve() { // Get initial solution double x0 = getMin(); double x1 = getMax(); double f0 = computeObjectiveValue(x0); double f1 = computeObjectiveValue(x1); // If one of the bounds is the exact root, return it. Since these are // not under-approximations or over-approximations, we can return them // regardless of the allowed solutions. if (f0 == 0.0) { return x0; } if (f1 == 0.0) { return x1; } // Verify bracketing of initial solution. verifyBracketing(x0, x1); // Get accuracies. final double ftol = getFunctionValueAccuracy(); final double atol = getAbsoluteAccuracy(); final double rtol = getRelativeAccuracy(); // Keep track of inverted intervals, meaning that the left bound is // larger than the right bound. boolean inverted = false; // Keep finding better approximations. while (true) { // Calculate the next approximation. final double x = x1 - ((f1 * (x1 - x0)) / (f1 - f0)); final double fx = computeObjectiveValue(x); // If the new approximation is the exact root, return it. Since // this is not an under-approximation or an over-approximation, // we can return it regardless of the allowed solutions. if (fx == 0.0) { return x; } // Update the bounds with the new approximation. if (f1 * fx < 0) { // The value of x1 has switched to the other bound, thus inverting // the interval. x0 = x1; f0 = f1; inverted = !inverted; } else { switch (method) { case ILLINOIS: f0 *= 0.5; break; case PEGASUS: f0 *= f1 / (f1 + fx); break; case REGULA_FALSI: if (x == x1) { final double delta = FastMath.max(rtol * FastMath.abs(x1), atol); // Update formula cannot make any progress: Update the // search interval. x0 = 0.5 * (x0 + x1 - delta); f0 = computeObjectiveValue(x0); } break; default: // Should never happen. throw new MathInternalError(); } } // Update from [x0, x1] to [x0, x]. x1 = x; f1 = fx; // If the function value of the last approximation is too small, // given the function value accuracy, then we can't get closer to // the root than we already are. if (FastMath.abs(f1) <= ftol) { switch (allowed) { case ANY_SIDE: return x1; case LEFT_SIDE: if (inverted) { return x1; } break; case RIGHT_SIDE: if (!inverted) { return x1; } break; case BELOW_SIDE: if (f1 <= 0) { return x1; } break; case ABOVE_SIDE: if (f1 >= 0) { return x1; } break; default: throw new MathInternalError(); } } // If the current interval is within the given accuracies, we // are satisfied with the current approximation. if (FastMath.abs(x1 - x0) < FastMath.max(rtol * FastMath.abs(x1), atol)) { switch (allowed) { case ANY_SIDE: return x1; case LEFT_SIDE: return inverted ? x1 : x0; case RIGHT_SIDE: return inverted ? x0 : x1; case BELOW_SIDE: return (f1 <= 0) ? x1 : x0; case ABOVE_SIDE: return (f1 >= 0) ? x1 : x0; default: throw new MathInternalError(); } } } }
protected final double doSolve ( ) { double x0 = getMin ( ) ; double x1 = getMax ( ) ; double f0 = computeObjectiveValue ( x0 ) ; double f1 = computeObjectiveValue ( x1 ) ; if ( f0 == 0.0 ) { return x0 ; } if ( f1 == 0.0 ) { return x1 ; } verifyBracketing ( x0 , x1 ) ; final double ftol = getFunctionValueAccuracy ( ) ; final double atol = getAbsoluteAccuracy ( ) ; final double rtol = getRelativeAccuracy ( ) ; boolean inverted = false ; while ( true ) { final double x = x1 - ( ( f1 * ( x1 - x0 ) ) / ( f1 - f0 ) ) ; final double fx = computeObjectiveValue ( x ) ; if ( fx == 0.0 ) { return x ; } if ( f1 * fx < 0 ) { x0 = x1 ; f0 = f1 ; inverted = ! inverted ; } else { switch ( method ) { case ILLINOIS : f0 *= 0.5 ; break ; case PEGASUS : f0 *= f1 / ( f1 + fx ) ; break ; case REGULA_FALSI : if ( x == x1 ) { final double delta = FastMath . max ( rtol * FastMath . abs ( x1 ) , atol ) ; x0 = 0.5 * ( x0 + x1 - delta ) ; f0 = computeObjectiveValue ( x0 ) ; } break ; default : throw new MathInternalError ( ) ; } } x1 = x ; f1 = fx ; if ( FastMath . abs ( f1 ) <= ftol ) { switch ( allowed ) { case ANY_SIDE : return x1 ; case LEFT_SIDE : if ( inverted ) { return x1 ; } break ; case RIGHT_SIDE : if ( ! inverted ) { return x1 ; } break ; case BELOW_SIDE : if ( f1 <= 0 ) { return x1 ; } break ; case ABOVE_SIDE : if ( f1 >= 0 ) { return x1 ; } break ; default : throw new MathInternalError ( ) ; } } if ( FastMath . abs ( x1 - x0 ) < FastMath . max ( rtol * FastMath . abs ( x1 ) , atol ) ) { switch ( allowed ) { case ANY_SIDE : return x1 ; case LEFT_SIDE : return inverted ? x1 : x0 ; case RIGHT_SIDE : return inverted ? x0 : x1 ; case BELOW_SIDE : return ( f1 <= 0 ) ? x1 : x0 ; case ABOVE_SIDE : return ( f1 >= 0 ) ? x1 : x0 ; default : throw new MathInternalError ( ) ; } } } }
Codec
15
src/main/java/org/apache/commons/codec/language/Soundex.java
183
198
Bug in HW rule in Soundex
The Soundex algorithm says that if two characters that map to the same code are separated by H or W, the second one is not encoded. However, in the implementation (in Soundex.getMappingCode() line 191), a character that is preceded by two characters that are either H or W, is not encoded, regardless of what the last consonant was. Source: http://en.wikipedia.org/wiki/Soundex#American_Soundex
private char getMappingCode(final String str, final int index) { // map() throws IllegalArgumentException final char mappedChar = this.map(str.charAt(index)); // HW rule check if (index > 1 && mappedChar != '0') { final char hwChar = str.charAt(index - 1); if ('H' == hwChar || 'W' == hwChar) { final char preHWChar = str.charAt(index - 2); final char firstCode = this.map(preHWChar); if (firstCode == mappedChar || 'H' == preHWChar || 'W' == preHWChar) { return 0; } } } return mappedChar; }
private char getMappingCode ( final String str , final int index ) { final char mappedChar = this . map ( str . charAt ( index ) ) ; if ( index > 1 && mappedChar != '0' ) { final char hwChar = str . charAt ( index - 1 ) ; if ( 'H' == hwChar || 'W' == hwChar ) { final char preHWChar = str . charAt ( index - 2 ) ; final char firstCode = this . map ( preHWChar ) ; if ( firstCode == mappedChar || 'H' == preHWChar || 'W' == preHWChar ) { return 0 ; } } } return mappedChar ; }
private char getMappingCode(final String str, final int index) { // map() throws IllegalArgumentException final char mappedChar = this.map(str.charAt(index)); // HW rule check if (index > 1 && mappedChar != '0') { for (int i=index-1 ; i>=0 ; i--) { final char prevChar = str.charAt(i); if (this.map(prevChar)==mappedChar) { return 0; } if ('H'!=prevChar && 'W'!=prevChar) { break; } } } return mappedChar; }
private char getMappingCode ( final String str , final int index ) { final char mappedChar = this . map ( str . charAt ( index ) ) ; if ( index > 1 && mappedChar != '0' ) { for ( int i = index - 1 ; i >= 0 ; i -- ) { final char prevChar = str . charAt ( i ) ; if ( this . map ( prevChar ) == mappedChar ) { return 0 ; } if ( 'H' != prevChar && 'W' != prevChar ) { break ; } } } return mappedChar ; }
JacksonCore
15
src/main/java/com/fasterxml/jackson/core/filter/FilteringParserDelegate.java
221
433
Make use of `_allowMultipleMatches` in `FilteringParserDelegate`
Currently, it looks like that the _allowMultipleMatches attribute in FilteringGeneratorDelegate is not utilised (i.e. no value is assigned to this variable). Re. the documentation this attribute offers some useful functionality. So it would be nice, if it could be implemented properly. See https://groups.google.com/d/msg/jackson-user/VzZ94G9hvrs/JGFozl6lCQAJ
@Override public JsonToken nextToken() throws IOException { //Check for _allowMultipleMatches - false and atleast there is one token - which is _currToken // check for no buffered context _exposedContext - null //If all the conditions matches then check for scalar / non-scalar property //if not scalar and ended successfully, then return null //else if scalar, and scalar not present in obj/array and !includePath and INCLUDE_ALL matched once // then return null // Anything buffered? TokenFilterContext ctxt = _exposedContext; if (ctxt != null) { while (true) { JsonToken t = ctxt.nextTokenToRead(); if (t != null) { _currToken = t; return t; } // all done with buffered stuff? if (ctxt == _headContext) { _exposedContext = null; if (ctxt.inArray()) { t = delegate.getCurrentToken(); // Is this guaranteed to work without further checks? // if (t != JsonToken.START_ARRAY) { _currToken = t; return t; } // Almost! Most likely still have the current token; // with the sole exception of /* t = delegate.getCurrentToken(); if (t != JsonToken.FIELD_NAME) { _currToken = t; return t; } */ break; } // If not, traverse down the context chain ctxt = _headContext.findChildOf(ctxt); _exposedContext = ctxt; if (ctxt == null) { // should never occur throw _constructError("Unexpected problem: chain of filtered context broken"); } } } // If not, need to read more. If we got any: JsonToken t = delegate.nextToken(); if (t == null) { // no strict need to close, since we have no state here return (_currToken = t); } // otherwise... to include or not? TokenFilter f; switch (t.id()) { case ID_START_ARRAY: f = _itemFilter; if (f == TokenFilter.INCLUDE_ALL) { _headContext = _headContext.createChildArrayContext(f, true); return (_currToken = t); } if (f == null) { // does this occur? delegate.skipChildren(); break; } // Otherwise still iffy, need to check f = _headContext.checkValue(f); if (f == null) { delegate.skipChildren(); break; } if (f != TokenFilter.INCLUDE_ALL) { f = f.filterStartArray(); } _itemFilter = f; if (f == TokenFilter.INCLUDE_ALL) { _headContext = _headContext.createChildArrayContext(f, true); return (_currToken = t); } _headContext = _headContext.createChildArrayContext(f, false); // Also: only need buffering if parent path to be included if (_includePath) { t = _nextTokenWithBuffering(_headContext); if (t != null) { _currToken = t; return t; } } break; case ID_START_OBJECT: f = _itemFilter; if (f == TokenFilter.INCLUDE_ALL) { _headContext = _headContext.createChildObjectContext(f, true); return (_currToken = t); } if (f == null) { // does this occur? delegate.skipChildren(); break; } // Otherwise still iffy, need to check f = _headContext.checkValue(f); if (f == null) { delegate.skipChildren(); break; } if (f != TokenFilter.INCLUDE_ALL) { f = f.filterStartObject(); } _itemFilter = f; if (f == TokenFilter.INCLUDE_ALL) { _headContext = _headContext.createChildObjectContext(f, true); return (_currToken = t); } _headContext = _headContext.createChildObjectContext(f, false); // Also: only need buffering if parent path to be included if (_includePath) { t = _nextTokenWithBuffering(_headContext); if (t != null) { _currToken = t; return t; } } // note: inclusion of surrounding Object handled separately via // FIELD_NAME break; case ID_END_ARRAY: case ID_END_OBJECT: { boolean returnEnd = _headContext.isStartHandled(); f = _headContext.getFilter(); if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) { f.filterFinishArray(); } _headContext = _headContext.getParent(); _itemFilter = _headContext.getFilter(); if (returnEnd) { return (_currToken = t); } } break; case ID_FIELD_NAME: { final String name = delegate.getCurrentName(); // note: this will also set 'needToHandleName' f = _headContext.setFieldName(name); if (f == TokenFilter.INCLUDE_ALL) { _itemFilter = f; if (!_includePath) { // Minor twist here: if parent NOT included, may need to induce output of // surrounding START_OBJECT/END_OBJECT if (_includeImmediateParent && !_headContext.isStartHandled()) { t = _headContext.nextTokenToRead(); // returns START_OBJECT but also marks it handled _exposedContext = _headContext; } } return (_currToken = t); } if (f == null) { delegate.nextToken(); delegate.skipChildren(); break; } f = f.includeProperty(name); if (f == null) { delegate.nextToken(); delegate.skipChildren(); break; } _itemFilter = f; if (f == TokenFilter.INCLUDE_ALL) { if (_includePath) { return (_currToken = t); } } if (_includePath) { t = _nextTokenWithBuffering(_headContext); if (t != null) { _currToken = t; return t; } } break; } default: // scalar value f = _itemFilter; if (f == TokenFilter.INCLUDE_ALL) { return (_currToken = t); } if (f != null) { f = _headContext.checkValue(f); if ((f == TokenFilter.INCLUDE_ALL) || ((f != null) && f.includeValue(delegate))) { return (_currToken = t); } } // Otherwise not included (leaves must be explicitly included) break; } // We get here if token was not yet found; offlined handling return _nextToken2(); }
@ Override public JsonToken nextToken ( ) throws IOException { TokenFilterContext ctxt = _exposedContext ; if ( ctxt != null ) { while ( true ) { JsonToken t = ctxt . nextTokenToRead ( ) ; if ( t != null ) { _currToken = t ; return t ; } if ( ctxt == _headContext ) { _exposedContext = null ; if ( ctxt . inArray ( ) ) { t = delegate . getCurrentToken ( ) ; _currToken = t ; return t ; } break ; } ctxt = _headContext . findChildOf ( ctxt ) ; _exposedContext = ctxt ; if ( ctxt == null ) { throw _constructError ( "Unexpected problem: chain of filtered context broken" ) ; } } } JsonToken t = delegate . nextToken ( ) ; if ( t == null ) { return ( _currToken = t ) ; } TokenFilter f ; switch ( t . id ( ) ) { case ID_START_ARRAY : f = _itemFilter ; if ( f == TokenFilter . INCLUDE_ALL ) { _headContext = _headContext . createChildArrayContext ( f , true ) ; return ( _currToken = t ) ; } if ( f == null ) { delegate . skipChildren ( ) ; break ; } f = _headContext . checkValue ( f ) ; if ( f == null ) { delegate . skipChildren ( ) ; break ; } if ( f != TokenFilter . INCLUDE_ALL ) { f = f . filterStartArray ( ) ; } _itemFilter = f ; if ( f == TokenFilter . INCLUDE_ALL ) { _headContext = _headContext . createChildArrayContext ( f , true ) ; return ( _currToken = t ) ; } _headContext = _headContext . createChildArrayContext ( f , false ) ; if ( _includePath ) { t = _nextTokenWithBuffering ( _headContext ) ; if ( t != null ) { _currToken = t ; return t ; } } break ; case ID_START_OBJECT : f = _itemFilter ; if ( f == TokenFilter . INCLUDE_ALL ) { _headContext = _headContext . createChildObjectContext ( f , true ) ; return ( _currToken = t ) ; } if ( f == null ) { delegate . skipChildren ( ) ; break ; } f = _headContext . checkValue ( f ) ; if ( f == null ) { delegate . skipChildren ( ) ; break ; } if ( f != TokenFilter . INCLUDE_ALL ) { f = f . filterStartObject ( ) ; } _itemFilter = f ; if ( f == TokenFilter . INCLUDE_ALL ) { _headContext = _headContext . createChildObjectContext ( f , true ) ; return ( _currToken = t ) ; } _headContext = _headContext . createChildObjectContext ( f , false ) ; if ( _includePath ) { t = _nextTokenWithBuffering ( _headContext ) ; if ( t != null ) { _currToken = t ; return t ; } } break ; case ID_END_ARRAY : case ID_END_OBJECT : { boolean returnEnd = _headContext . isStartHandled ( ) ; f = _headContext . getFilter ( ) ; if ( ( f != null ) && ( f != TokenFilter . INCLUDE_ALL ) ) { f . filterFinishArray ( ) ; } _headContext = _headContext . getParent ( ) ; _itemFilter = _headContext . getFilter ( ) ; if ( returnEnd ) { return ( _currToken = t ) ; } } break ; case ID_FIELD_NAME : { final String name = delegate . getCurrentName ( ) ; f = _headContext . setFieldName ( name ) ; if ( f == TokenFilter . INCLUDE_ALL ) { _itemFilter = f ; if ( ! _includePath ) { if ( _includeImmediateParent && ! _headContext . isStartHandled ( ) ) { t = _headContext . nextTokenToRead ( ) ; _exposedContext = _headContext ; } } return ( _currToken = t ) ; } if ( f == null ) { delegate . nextToken ( ) ; delegate . skipChildren ( ) ; break ; } f = f . includeProperty ( name ) ; if ( f == null ) { delegate . nextToken ( ) ; delegate . skipChildren ( ) ; break ; } _itemFilter = f ; if ( f == TokenFilter . INCLUDE_ALL ) { if ( _includePath ) { return ( _currToken = t ) ; } } if ( _includePath ) { t = _nextTokenWithBuffering ( _headContext ) ; if ( t != null ) { _currToken = t ; return t ; } } break ; } default : f = _itemFilter ; if ( f == TokenFilter . INCLUDE_ALL ) { return ( _currToken = t ) ; } if ( f != null ) { f = _headContext . checkValue ( f ) ; if ( ( f == TokenFilter . INCLUDE_ALL ) || ( ( f != null ) && f . includeValue ( delegate ) ) ) { return ( _currToken = t ) ; } } break ; } return _nextToken2 ( ) ; }
@Override public JsonToken nextToken() throws IOException { //Check for _allowMultipleMatches - false and atleast there is one token - which is _currToken // check for no buffered context _exposedContext - null //If all the conditions matches then check for scalar / non-scalar property if(!_allowMultipleMatches && _currToken != null && _exposedContext == null){ //if not scalar and ended successfully, then return null if((_currToken.isStructEnd() && _headContext.isStartHandled()) ){ return (_currToken = null); } //else if scalar, and scalar not present in obj/array and !includePath and INCLUDE_ALL matched once // then return null else if(_currToken.isScalarValue() && !_headContext.isStartHandled() && !_includePath && _itemFilter == TokenFilter.INCLUDE_ALL) { return (_currToken = null); } } // Anything buffered? TokenFilterContext ctxt = _exposedContext; if (ctxt != null) { while (true) { JsonToken t = ctxt.nextTokenToRead(); if (t != null) { _currToken = t; return t; } // all done with buffered stuff? if (ctxt == _headContext) { _exposedContext = null; if (ctxt.inArray()) { t = delegate.getCurrentToken(); // Is this guaranteed to work without further checks? // if (t != JsonToken.START_ARRAY) { _currToken = t; return t; } // Almost! Most likely still have the current token; // with the sole exception of /* t = delegate.getCurrentToken(); if (t != JsonToken.FIELD_NAME) { _currToken = t; return t; } */ break; } // If not, traverse down the context chain ctxt = _headContext.findChildOf(ctxt); _exposedContext = ctxt; if (ctxt == null) { // should never occur throw _constructError("Unexpected problem: chain of filtered context broken"); } } } // If not, need to read more. If we got any: JsonToken t = delegate.nextToken(); if (t == null) { // no strict need to close, since we have no state here return (_currToken = t); } // otherwise... to include or not? TokenFilter f; switch (t.id()) { case ID_START_ARRAY: f = _itemFilter; if (f == TokenFilter.INCLUDE_ALL) { _headContext = _headContext.createChildArrayContext(f, true); return (_currToken = t); } if (f == null) { // does this occur? delegate.skipChildren(); break; } // Otherwise still iffy, need to check f = _headContext.checkValue(f); if (f == null) { delegate.skipChildren(); break; } if (f != TokenFilter.INCLUDE_ALL) { f = f.filterStartArray(); } _itemFilter = f; if (f == TokenFilter.INCLUDE_ALL) { _headContext = _headContext.createChildArrayContext(f, true); return (_currToken = t); } _headContext = _headContext.createChildArrayContext(f, false); // Also: only need buffering if parent path to be included if (_includePath) { t = _nextTokenWithBuffering(_headContext); if (t != null) { _currToken = t; return t; } } break; case ID_START_OBJECT: f = _itemFilter; if (f == TokenFilter.INCLUDE_ALL) { _headContext = _headContext.createChildObjectContext(f, true); return (_currToken = t); } if (f == null) { // does this occur? delegate.skipChildren(); break; } // Otherwise still iffy, need to check f = _headContext.checkValue(f); if (f == null) { delegate.skipChildren(); break; } if (f != TokenFilter.INCLUDE_ALL) { f = f.filterStartObject(); } _itemFilter = f; if (f == TokenFilter.INCLUDE_ALL) { _headContext = _headContext.createChildObjectContext(f, true); return (_currToken = t); } _headContext = _headContext.createChildObjectContext(f, false); // Also: only need buffering if parent path to be included if (_includePath) { t = _nextTokenWithBuffering(_headContext); if (t != null) { _currToken = t; return t; } } // note: inclusion of surrounding Object handled separately via // FIELD_NAME break; case ID_END_ARRAY: case ID_END_OBJECT: { boolean returnEnd = _headContext.isStartHandled(); f = _headContext.getFilter(); if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) { f.filterFinishArray(); } _headContext = _headContext.getParent(); _itemFilter = _headContext.getFilter(); if (returnEnd) { return (_currToken = t); } } break; case ID_FIELD_NAME: { final String name = delegate.getCurrentName(); // note: this will also set 'needToHandleName' f = _headContext.setFieldName(name); if (f == TokenFilter.INCLUDE_ALL) { _itemFilter = f; if (!_includePath) { // Minor twist here: if parent NOT included, may need to induce output of // surrounding START_OBJECT/END_OBJECT if (_includeImmediateParent && !_headContext.isStartHandled()) { t = _headContext.nextTokenToRead(); // returns START_OBJECT but also marks it handled _exposedContext = _headContext; } } return (_currToken = t); } if (f == null) { delegate.nextToken(); delegate.skipChildren(); break; } f = f.includeProperty(name); if (f == null) { delegate.nextToken(); delegate.skipChildren(); break; } _itemFilter = f; if (f == TokenFilter.INCLUDE_ALL) { if (_includePath) { return (_currToken = t); } } if (_includePath) { t = _nextTokenWithBuffering(_headContext); if (t != null) { _currToken = t; return t; } } break; } default: // scalar value f = _itemFilter; if (f == TokenFilter.INCLUDE_ALL) { return (_currToken = t); } if (f != null) { f = _headContext.checkValue(f); if ((f == TokenFilter.INCLUDE_ALL) || ((f != null) && f.includeValue(delegate))) { return (_currToken = t); } } // Otherwise not included (leaves must be explicitly included) break; } // We get here if token was not yet found; offlined handling return _nextToken2(); }
@ Override public JsonToken nextToken ( ) throws IOException { if ( ! _allowMultipleMatches && _currToken != null && _exposedContext == null ) { if ( ( _currToken . isStructEnd ( ) && _headContext . isStartHandled ( ) ) ) { return ( _currToken = null ) ; } else if ( _currToken . isScalarValue ( ) && ! _headContext . isStartHandled ( ) && ! _includePath && _itemFilter == TokenFilter . INCLUDE_ALL ) { return ( _currToken = null ) ; } } TokenFilterContext ctxt = _exposedContext ; if ( ctxt != null ) { while ( true ) { JsonToken t = ctxt . nextTokenToRead ( ) ; if ( t != null ) { _currToken = t ; return t ; } if ( ctxt == _headContext ) { _exposedContext = null ; if ( ctxt . inArray ( ) ) { t = delegate . getCurrentToken ( ) ; _currToken = t ; return t ; } break ; } ctxt = _headContext . findChildOf ( ctxt ) ; _exposedContext = ctxt ; if ( ctxt == null ) { throw _constructError ( "Unexpected problem: chain of filtered context broken" ) ; } } } JsonToken t = delegate . nextToken ( ) ; if ( t == null ) { return ( _currToken = t ) ; } TokenFilter f ; switch ( t . id ( ) ) { case ID_START_ARRAY : f = _itemFilter ; if ( f == TokenFilter . INCLUDE_ALL ) { _headContext = _headContext . createChildArrayContext ( f , true ) ; return ( _currToken = t ) ; } if ( f == null ) { delegate . skipChildren ( ) ; break ; } f = _headContext . checkValue ( f ) ; if ( f == null ) { delegate . skipChildren ( ) ; break ; } if ( f != TokenFilter . INCLUDE_ALL ) { f = f . filterStartArray ( ) ; } _itemFilter = f ; if ( f == TokenFilter . INCLUDE_ALL ) { _headContext = _headContext . createChildArrayContext ( f , true ) ; return ( _currToken = t ) ; } _headContext = _headContext . createChildArrayContext ( f , false ) ; if ( _includePath ) { t = _nextTokenWithBuffering ( _headContext ) ; if ( t != null ) { _currToken = t ; return t ; } } break ; case ID_START_OBJECT : f = _itemFilter ; if ( f == TokenFilter . INCLUDE_ALL ) { _headContext = _headContext . createChildObjectContext ( f , true ) ; return ( _currToken = t ) ; } if ( f == null ) { delegate . skipChildren ( ) ; break ; } f = _headContext . checkValue ( f ) ; if ( f == null ) { delegate . skipChildren ( ) ; break ; } if ( f != TokenFilter . INCLUDE_ALL ) { f = f . filterStartObject ( ) ; } _itemFilter = f ; if ( f == TokenFilter . INCLUDE_ALL ) { _headContext = _headContext . createChildObjectContext ( f , true ) ; return ( _currToken = t ) ; } _headContext = _headContext . createChildObjectContext ( f , false ) ; if ( _includePath ) { t = _nextTokenWithBuffering ( _headContext ) ; if ( t != null ) { _currToken = t ; return t ; } } break ; case ID_END_ARRAY : case ID_END_OBJECT : { boolean returnEnd = _headContext . isStartHandled ( ) ; f = _headContext . getFilter ( ) ; if ( ( f != null ) && ( f != TokenFilter . INCLUDE_ALL ) ) { f . filterFinishArray ( ) ; } _headContext = _headContext . getParent ( ) ; _itemFilter = _headContext . getFilter ( ) ; if ( returnEnd ) { return ( _currToken = t ) ; } } break ; case ID_FIELD_NAME : { final String name = delegate . getCurrentName ( ) ; f = _headContext . setFieldName ( name ) ; if ( f == TokenFilter . INCLUDE_ALL ) { _itemFilter = f ; if ( ! _includePath ) { if ( _includeImmediateParent && ! _headContext . isStartHandled ( ) ) { t = _headContext . nextTokenToRead ( ) ; _exposedContext = _headContext ; } } return ( _currToken = t ) ; } if ( f == null ) { delegate . nextToken ( ) ; delegate . skipChildren ( ) ; break ; } f = f . includeProperty ( name ) ; if ( f == null ) { delegate . nextToken ( ) ; delegate . skipChildren ( ) ; break ; } _itemFilter = f ; if ( f == TokenFilter . INCLUDE_ALL ) { if ( _includePath ) { return ( _currToken = t ) ; } } if ( _includePath ) { t = _nextTokenWithBuffering ( _headContext ) ; if ( t != null ) { _currToken = t ; return t ; } } break ; } default : f = _itemFilter ; if ( f == TokenFilter . INCLUDE_ALL ) { return ( _currToken = t ) ; } if ( f != null ) { f = _headContext . checkValue ( f ) ; if ( ( f == TokenFilter . INCLUDE_ALL ) || ( ( f != null ) && f . includeValue ( delegate ) ) ) { return ( _currToken = t ) ; } } break ; } return _nextToken2 ( ) ; }
JacksonDatabind
34
src/main/java/com/fasterxml/jackson/databind/ser/std/NumberSerializer.java
73
87
Regression in 2.7.0-rc2, for schema/introspection for `BigDecimal`
(found via Avro module, but surprisingly json schema module has not test to catch it) Looks like schema type for `BigDecimal` is not correctly produced, due to an error in refactoring (made to simplify introspection for simple serializers): it is seen as `BigInteger` (and for Avro, for example, results in `long` getting written).
@Override public void acceptJsonFormatVisitor(JsonFormatVisitorWrapper visitor, JavaType typeHint) throws JsonMappingException { if (_isInt) { visitIntFormat(visitor, typeHint, JsonParser.NumberType.BIG_INTEGER); } else { Class<?> h = handledType(); if (h == BigDecimal.class) { visitFloatFormat(visitor, typeHint, JsonParser.NumberType.BIG_INTEGER); } else { // otherwise bit unclear what to call... but let's try: /*JsonNumberFormatVisitor v2 =*/ visitor.expectNumberFormat(typeHint); } } }
@ Override public void acceptJsonFormatVisitor ( JsonFormatVisitorWrapper visitor , JavaType typeHint ) throws JsonMappingException { if ( _isInt ) { visitIntFormat ( visitor , typeHint , JsonParser . NumberType . BIG_INTEGER ) ; } else { Class < ? > h = handledType ( ) ; if ( h == BigDecimal . class ) { visitFloatFormat ( visitor , typeHint , JsonParser . NumberType . BIG_INTEGER ) ; } else { visitor . expectNumberFormat ( typeHint ) ; } } }
@Override public void acceptJsonFormatVisitor(JsonFormatVisitorWrapper visitor, JavaType typeHint) throws JsonMappingException { if (_isInt) { visitIntFormat(visitor, typeHint, JsonParser.NumberType.BIG_INTEGER); } else { Class<?> h = handledType(); if (h == BigDecimal.class) { visitFloatFormat(visitor, typeHint, JsonParser.NumberType.BIG_DECIMAL); } else { // otherwise bit unclear what to call... but let's try: /*JsonNumberFormatVisitor v2 =*/ visitor.expectNumberFormat(typeHint); } } }
@ Override public void acceptJsonFormatVisitor ( JsonFormatVisitorWrapper visitor , JavaType typeHint ) throws JsonMappingException { if ( _isInt ) { visitIntFormat ( visitor , typeHint , JsonParser . NumberType . BIG_INTEGER ) ; } else { Class < ? > h = handledType ( ) ; if ( h == BigDecimal . class ) { visitFloatFormat ( visitor , typeHint , JsonParser . NumberType . BIG_DECIMAL ) ; } else { visitor . expectNumberFormat ( typeHint ) ; } } }
JacksonDatabind
8
src/main/java/com/fasterxml/jackson/databind/deser/impl/CreatorCollector.java
276
308
Problem with bogus conflict between single-arg-String vs `CharSequence` constructor
Although it is good idea to allow recognizing `CharSequence` as almost like an alias for `String`, this can cause problems for classes like `StringBuilder` that have separate constructors for both. This actually throws a bogus exception for 2.5.0, due to introduction of ability to recognize `CharSequence`.
protected void verifyNonDup(AnnotatedWithParams newOne, int typeIndex, boolean explicit) { final int mask = (1 << typeIndex); _hasNonDefaultCreator = true; AnnotatedWithParams oldOne = _creators[typeIndex]; // already had an explicitly marked one? if (oldOne != null) { if ((_explicitCreators & mask) != 0) { // already had explicitly annotated, leave as-is // but skip, if new one not annotated if (!explicit) { return; } // both explicit: verify // otherwise only verify if neither explicitly annotated. } // one more thing: ok to override in sub-class if (oldOne.getClass() == newOne.getClass()) { // [databind#667]: avoid one particular class of bogus problems throw new IllegalArgumentException("Conflicting "+TYPE_DESCS[typeIndex] +" creators: already had explicitly marked "+oldOne+", encountered "+newOne); // otherwise, which one to choose? // new type more generic, use old // new type more specific, use it } } if (explicit) { _explicitCreators |= mask; } _creators[typeIndex] = _fixAccess(newOne); }
protected void verifyNonDup ( AnnotatedWithParams newOne , int typeIndex , boolean explicit ) { final int mask = ( 1 << typeIndex ) ; _hasNonDefaultCreator = true ; AnnotatedWithParams oldOne = _creators [ typeIndex ] ; if ( oldOne != null ) { if ( ( _explicitCreators & mask ) != 0 ) { if ( ! explicit ) { return ; } } if ( oldOne . getClass ( ) == newOne . getClass ( ) ) { throw new IllegalArgumentException ( "Conflicting " + TYPE_DESCS [ typeIndex ] + " creators: already had explicitly marked " + oldOne + ", encountered " + newOne ) ; } } if ( explicit ) { _explicitCreators |= mask ; } _creators [ typeIndex ] = _fixAccess ( newOne ) ; }
protected void verifyNonDup(AnnotatedWithParams newOne, int typeIndex, boolean explicit) { final int mask = (1 << typeIndex); _hasNonDefaultCreator = true; AnnotatedWithParams oldOne = _creators[typeIndex]; // already had an explicitly marked one? if (oldOne != null) { boolean verify; if ((_explicitCreators & mask) != 0) { // already had explicitly annotated, leave as-is // but skip, if new one not annotated if (!explicit) { return; } // both explicit: verify verify = true; } else { // otherwise only verify if neither explicitly annotated. verify = !explicit; } // one more thing: ok to override in sub-class if (verify && (oldOne.getClass() == newOne.getClass())) { // [databind#667]: avoid one particular class of bogus problems Class<?> oldType = oldOne.getRawParameterType(0); Class<?> newType = newOne.getRawParameterType(0); if (oldType == newType) { throw new IllegalArgumentException("Conflicting "+TYPE_DESCS[typeIndex] +" creators: already had explicitly marked "+oldOne+", encountered "+newOne); } // otherwise, which one to choose? if (newType.isAssignableFrom(oldType)) { // new type more generic, use old return; } // new type more specific, use it } } if (explicit) { _explicitCreators |= mask; } _creators[typeIndex] = _fixAccess(newOne); }
protected void verifyNonDup ( AnnotatedWithParams newOne , int typeIndex , boolean explicit ) { final int mask = ( 1 << typeIndex ) ; _hasNonDefaultCreator = true ; AnnotatedWithParams oldOne = _creators [ typeIndex ] ; if ( oldOne != null ) { boolean verify ; if ( ( _explicitCreators & mask ) != 0 ) { if ( ! explicit ) { return ; } verify = true ; } else { verify = ! explicit ; } if ( verify && ( oldOne . getClass ( ) == newOne . getClass ( ) ) ) { Class < ? > oldType = oldOne . getRawParameterType ( 0 ) ; Class < ? > newType = newOne . getRawParameterType ( 0 ) ; if ( oldType == newType ) { throw new IllegalArgumentException ( "Conflicting " + TYPE_DESCS [ typeIndex ] + " creators: already had explicitly marked " + oldOne + ", encountered " + newOne ) ; } if ( newType . isAssignableFrom ( oldType ) ) { return ; } } } if ( explicit ) { _explicitCreators |= mask ; } _creators [ typeIndex ] = _fixAccess ( newOne ) ; }
Compress
25
src/main/java/org/apache/commons/compress/archivers/zip/ZipArchiveInputStream.java
174
184
ZIP reads correctly with commons-compress 1.6, gives NUL bytes in 1.7
When running the code below, commons-compress 1.6 writes: Content of test.txt: data By comparison, commons-compress 1.7 writes Content of test.txt: ^@^@^@^@^@ package com.example.jrn; import org.apache.commons.compress.archivers.zip.ZipArchiveEntry; import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream; import java.io.ByteArrayInputStream; import java.io.IOException; import java.lang.System; /** * Hello world! * */ public class App { public static void main(String[] args) { byte[] zip = { (byte)0x50, (byte)0x4b, (byte)0x03, (byte)0x04, (byte)0x0a, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x03, (byte)0x7b, (byte)0xd1, (byte)0x42, (byte)0x82, (byte)0xc5, (byte)0xc1, (byte)0xe6, (byte)0x05, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x05, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x08, (byte)0x00, (byte)0x1c, (byte)0x00, (byte)0x74, (byte)0x65, (byte)0x73, (byte)0x74, (byte)0x2e, (byte)0x74, (byte)0x78, (byte)0x74, (byte)0x55, (byte)0x54, (byte)0x09, (byte)0x00, (byte)0x03, (byte)0x56, (byte)0x62, (byte)0xbf, (byte)0x51, (byte)0x2a, (byte)0x63, (byte)0xbf, (byte)0x51, (byte)0x75, (byte)0x78, (byte)0x0b, (byte)0x00, (byte)0x01, (byte)0x04, (byte)0x01, (byte)0xff, (byte)0x01, (byte)0x00, (byte)0x04, (byte)0x88, (byte)0x13, (byte)0x00, (byte)0x00, (byte)0x64, (byte)0x61, (byte)0x74, (byte)0x61, (byte)0x0a, (byte)0x50, (byte)0x4b, (byte)0x01, (byte)0x02, (byte)0x1e, (byte)0x03, (byte)0x0a, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x03, (byte)0x7b, (byte)0xd1, (byte)0x42, (byte)0x82, (byte)0xc5, (byte)0xc1, (byte)0xe6, (byte)0x05, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x05, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x08, (byte)0x00, (byte)0x18, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x01, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0xa0, (byte)0x81, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x74, (byte)0x65, (byte)0x73, (byte)0x74, (byte)0x2e, (byte)0x74, (byte)0x78, (byte)0x74, (byte)0x55, (byte)0x54, (byte)0x05, (byte)0x00, (byte)0x03, (byte)0x56, (byte)0x62, (byte)0xbf, (byte)0x51, (byte)0x75, (byte)0x78, (byte)0x0b, (byte)0x00, (byte)0x01, (byte)0x04, (byte)0x01, (byte)0xff, (byte)0x01, (byte)0x00, (byte)0x04, (byte)0x88, (byte)0x13, (byte)0x00, (byte)0x00, (byte)0x50, (byte)0x4b, (byte)0x05, (byte)0x06, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x01, (byte)0x00, (byte)0x01, (byte)0x00, (byte)0x4e, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x47, (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00, (byte)00 }; ByteArrayInputStream bin = new ByteArrayInputStream(zip); try { ZipArchiveInputStream in = new ZipArchiveInputStream(bin); try { while (true) { ZipArchiveEntry entry = in.getNextZipEntry(); if (entry == null) { break; } byte[] buf = new byte[(int) entry.getSize()]; in.read(buf); System.out.println("Content of " + entry.getName() + ":"); System.out.write(buf); } } finally { in.close(); } } catch (IOException e) { System.err.println("IOException: " + e); } } }
public ZipArchiveInputStream(InputStream inputStream, String encoding, boolean useUnicodeExtraFields, boolean allowStoredEntriesWithDataDescriptor) { zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); this.useUnicodeExtraFields = useUnicodeExtraFields; in = new PushbackInputStream(inputStream, buf.capacity()); this.allowStoredEntriesWithDataDescriptor = allowStoredEntriesWithDataDescriptor; // haven't read anything so far }
public ZipArchiveInputStream ( InputStream inputStream , String encoding , boolean useUnicodeExtraFields , boolean allowStoredEntriesWithDataDescriptor ) { zipEncoding = ZipEncodingHelper . getZipEncoding ( encoding ) ; this . useUnicodeExtraFields = useUnicodeExtraFields ; in = new PushbackInputStream ( inputStream , buf . capacity ( ) ) ; this . allowStoredEntriesWithDataDescriptor = allowStoredEntriesWithDataDescriptor ; }
public ZipArchiveInputStream(InputStream inputStream, String encoding, boolean useUnicodeExtraFields, boolean allowStoredEntriesWithDataDescriptor) { zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); this.useUnicodeExtraFields = useUnicodeExtraFields; in = new PushbackInputStream(inputStream, buf.capacity()); this.allowStoredEntriesWithDataDescriptor = allowStoredEntriesWithDataDescriptor; // haven't read anything so far buf.limit(0); }
public ZipArchiveInputStream ( InputStream inputStream , String encoding , boolean useUnicodeExtraFields , boolean allowStoredEntriesWithDataDescriptor ) { zipEncoding = ZipEncodingHelper . getZipEncoding ( encoding ) ; this . useUnicodeExtraFields = useUnicodeExtraFields ; in = new PushbackInputStream ( inputStream , buf . capacity ( ) ) ; this . allowStoredEntriesWithDataDescriptor = allowStoredEntriesWithDataDescriptor ; buf . limit ( 0 ) ; }
JacksonXml
3
src/main/java/com/fasterxml/jackson/dataformat/xml/deser/FromXmlParser.java
600
693
FromXMLParser nextTextValue() incorrect for attributes
As of #129 the Method nextTextValue of FromXMLParser will no longer return a value for attributes. As the _currToken is JsonToken.VALUE_STRING in this case I think it is wrong to return null and it should return _currText.
@Override public String nextTextValue() throws IOException { _binaryValue = null; if (_nextToken != null) { JsonToken t = _nextToken; _currToken = t; _nextToken = null; // expected case; yes, got a String if (t == JsonToken.VALUE_STRING) { return _currText; } _updateState(t); return null; } int token = _xmlTokens.next(); // mostly copied from 'nextToken()' while (token == XmlTokenStream.XML_START_ELEMENT) { if (_mayBeLeaf) { _nextToken = JsonToken.FIELD_NAME; _parsingContext = _parsingContext.createChildObjectContext(-1, -1); _currToken = JsonToken.START_OBJECT; return null; } if (_parsingContext.inArray()) { token = _xmlTokens.next(); _mayBeLeaf = true; continue; } String name = _xmlTokens.getLocalName(); _parsingContext.setCurrentName(name); if (_namesToWrap != null && _namesToWrap.contains(name)) { _xmlTokens.repeatStartElement(); } _mayBeLeaf = true; _currToken = JsonToken.FIELD_NAME; return null; } // Ok; beyond start element, what do we get? switch (token) { case XmlTokenStream.XML_END_ELEMENT: if (_mayBeLeaf) { // NOTE: this is different from nextToken() -- produce "", NOT null _mayBeLeaf = false; _currToken = JsonToken.VALUE_STRING; return (_currText = ""); } _currToken = _parsingContext.inArray() ? JsonToken.END_ARRAY : JsonToken.END_OBJECT; _parsingContext = _parsingContext.getParent(); _namesToWrap = _parsingContext.getNamesToWrap(); break; case XmlTokenStream.XML_ATTRIBUTE_NAME: // If there was a chance of leaf node, no more... if (_mayBeLeaf) { _mayBeLeaf = false; _nextToken = JsonToken.FIELD_NAME; _currText = _xmlTokens.getText(); _parsingContext = _parsingContext.createChildObjectContext(-1, -1); _currToken = JsonToken.START_OBJECT; } else { _parsingContext.setCurrentName(_xmlTokens.getLocalName()); _currToken = JsonToken.FIELD_NAME; } break; case XmlTokenStream.XML_ATTRIBUTE_VALUE: _currText = _xmlTokens.getText(); _currToken = JsonToken.VALUE_STRING; break; case XmlTokenStream.XML_TEXT: _currText = _xmlTokens.getText(); if (_mayBeLeaf) { _mayBeLeaf = false; // Also: must skip following END_ELEMENT _xmlTokens.skipEndElement(); // NOTE: this is different from nextToken() -- NO work-around // for otherwise empty List/array _currToken = JsonToken.VALUE_STRING; return _currText; } // If not a leaf, need to transform into property... _parsingContext.setCurrentName(_cfgNameForTextElement); _nextToken = JsonToken.VALUE_STRING; _currToken = JsonToken.FIELD_NAME; break; case XmlTokenStream.XML_END: _currToken = null; } return null; }
@ Override public String nextTextValue ( ) throws IOException { _binaryValue = null ; if ( _nextToken != null ) { JsonToken t = _nextToken ; _currToken = t ; _nextToken = null ; if ( t == JsonToken . VALUE_STRING ) { return _currText ; } _updateState ( t ) ; return null ; } int token = _xmlTokens . next ( ) ; while ( token == XmlTokenStream . XML_START_ELEMENT ) { if ( _mayBeLeaf ) { _nextToken = JsonToken . FIELD_NAME ; _parsingContext = _parsingContext . createChildObjectContext ( - 1 , - 1 ) ; _currToken = JsonToken . START_OBJECT ; return null ; } if ( _parsingContext . inArray ( ) ) { token = _xmlTokens . next ( ) ; _mayBeLeaf = true ; continue ; } String name = _xmlTokens . getLocalName ( ) ; _parsingContext . setCurrentName ( name ) ; if ( _namesToWrap != null && _namesToWrap . contains ( name ) ) { _xmlTokens . repeatStartElement ( ) ; } _mayBeLeaf = true ; _currToken = JsonToken . FIELD_NAME ; return null ; } switch ( token ) { case XmlTokenStream . XML_END_ELEMENT : if ( _mayBeLeaf ) { _mayBeLeaf = false ; _currToken = JsonToken . VALUE_STRING ; return ( _currText = "" ) ; } _currToken = _parsingContext . inArray ( ) ? JsonToken . END_ARRAY : JsonToken . END_OBJECT ; _parsingContext = _parsingContext . getParent ( ) ; _namesToWrap = _parsingContext . getNamesToWrap ( ) ; break ; case XmlTokenStream . XML_ATTRIBUTE_NAME : if ( _mayBeLeaf ) { _mayBeLeaf = false ; _nextToken = JsonToken . FIELD_NAME ; _currText = _xmlTokens . getText ( ) ; _parsingContext = _parsingContext . createChildObjectContext ( - 1 , - 1 ) ; _currToken = JsonToken . START_OBJECT ; } else { _parsingContext . setCurrentName ( _xmlTokens . getLocalName ( ) ) ; _currToken = JsonToken . FIELD_NAME ; } break ; case XmlTokenStream . XML_ATTRIBUTE_VALUE : _currText = _xmlTokens . getText ( ) ; _currToken = JsonToken . VALUE_STRING ; break ; case XmlTokenStream . XML_TEXT : _currText = _xmlTokens . getText ( ) ; if ( _mayBeLeaf ) { _mayBeLeaf = false ; _xmlTokens . skipEndElement ( ) ; _currToken = JsonToken . VALUE_STRING ; return _currText ; } _parsingContext . setCurrentName ( _cfgNameForTextElement ) ; _nextToken = JsonToken . VALUE_STRING ; _currToken = JsonToken . FIELD_NAME ; break ; case XmlTokenStream . XML_END : _currToken = null ; } return null ; }
@Override public String nextTextValue() throws IOException { _binaryValue = null; if (_nextToken != null) { JsonToken t = _nextToken; _currToken = t; _nextToken = null; // expected case; yes, got a String if (t == JsonToken.VALUE_STRING) { return _currText; } _updateState(t); return null; } int token = _xmlTokens.next(); // mostly copied from 'nextToken()' while (token == XmlTokenStream.XML_START_ELEMENT) { if (_mayBeLeaf) { _nextToken = JsonToken.FIELD_NAME; _parsingContext = _parsingContext.createChildObjectContext(-1, -1); _currToken = JsonToken.START_OBJECT; return null; } if (_parsingContext.inArray()) { token = _xmlTokens.next(); _mayBeLeaf = true; continue; } String name = _xmlTokens.getLocalName(); _parsingContext.setCurrentName(name); if (_namesToWrap != null && _namesToWrap.contains(name)) { _xmlTokens.repeatStartElement(); } _mayBeLeaf = true; _currToken = JsonToken.FIELD_NAME; return null; } // Ok; beyond start element, what do we get? switch (token) { case XmlTokenStream.XML_END_ELEMENT: if (_mayBeLeaf) { // NOTE: this is different from nextToken() -- produce "", NOT null _mayBeLeaf = false; _currToken = JsonToken.VALUE_STRING; return (_currText = ""); } _currToken = _parsingContext.inArray() ? JsonToken.END_ARRAY : JsonToken.END_OBJECT; _parsingContext = _parsingContext.getParent(); _namesToWrap = _parsingContext.getNamesToWrap(); break; case XmlTokenStream.XML_ATTRIBUTE_NAME: // If there was a chance of leaf node, no more... if (_mayBeLeaf) { _mayBeLeaf = false; _nextToken = JsonToken.FIELD_NAME; _currText = _xmlTokens.getText(); _parsingContext = _parsingContext.createChildObjectContext(-1, -1); _currToken = JsonToken.START_OBJECT; } else { _parsingContext.setCurrentName(_xmlTokens.getLocalName()); _currToken = JsonToken.FIELD_NAME; } break; case XmlTokenStream.XML_ATTRIBUTE_VALUE: _currToken = JsonToken.VALUE_STRING; return (_currText = _xmlTokens.getText()); case XmlTokenStream.XML_TEXT: _currText = _xmlTokens.getText(); if (_mayBeLeaf) { _mayBeLeaf = false; // Also: must skip following END_ELEMENT _xmlTokens.skipEndElement(); // NOTE: this is different from nextToken() -- NO work-around // for otherwise empty List/array _currToken = JsonToken.VALUE_STRING; return _currText; } // If not a leaf, need to transform into property... _parsingContext.setCurrentName(_cfgNameForTextElement); _nextToken = JsonToken.VALUE_STRING; _currToken = JsonToken.FIELD_NAME; break; case XmlTokenStream.XML_END: _currToken = null; } return null; }
@ Override public String nextTextValue ( ) throws IOException { _binaryValue = null ; if ( _nextToken != null ) { JsonToken t = _nextToken ; _currToken = t ; _nextToken = null ; if ( t == JsonToken . VALUE_STRING ) { return _currText ; } _updateState ( t ) ; return null ; } int token = _xmlTokens . next ( ) ; while ( token == XmlTokenStream . XML_START_ELEMENT ) { if ( _mayBeLeaf ) { _nextToken = JsonToken . FIELD_NAME ; _parsingContext = _parsingContext . createChildObjectContext ( - 1 , - 1 ) ; _currToken = JsonToken . START_OBJECT ; return null ; } if ( _parsingContext . inArray ( ) ) { token = _xmlTokens . next ( ) ; _mayBeLeaf = true ; continue ; } String name = _xmlTokens . getLocalName ( ) ; _parsingContext . setCurrentName ( name ) ; if ( _namesToWrap != null && _namesToWrap . contains ( name ) ) { _xmlTokens . repeatStartElement ( ) ; } _mayBeLeaf = true ; _currToken = JsonToken . FIELD_NAME ; return null ; } switch ( token ) { case XmlTokenStream . XML_END_ELEMENT : if ( _mayBeLeaf ) { _mayBeLeaf = false ; _currToken = JsonToken . VALUE_STRING ; return ( _currText = "" ) ; } _currToken = _parsingContext . inArray ( ) ? JsonToken . END_ARRAY : JsonToken . END_OBJECT ; _parsingContext = _parsingContext . getParent ( ) ; _namesToWrap = _parsingContext . getNamesToWrap ( ) ; break ; case XmlTokenStream . XML_ATTRIBUTE_NAME : if ( _mayBeLeaf ) { _mayBeLeaf = false ; _nextToken = JsonToken . FIELD_NAME ; _currText = _xmlTokens . getText ( ) ; _parsingContext = _parsingContext . createChildObjectContext ( - 1 , - 1 ) ; _currToken = JsonToken . START_OBJECT ; } else { _parsingContext . setCurrentName ( _xmlTokens . getLocalName ( ) ) ; _currToken = JsonToken . FIELD_NAME ; } break ; case XmlTokenStream . XML_ATTRIBUTE_VALUE : _currToken = JsonToken . VALUE_STRING ; return ( _currText = _xmlTokens . getText ( ) ) ; case XmlTokenStream . XML_TEXT : _currText = _xmlTokens . getText ( ) ; if ( _mayBeLeaf ) { _mayBeLeaf = false ; _xmlTokens . skipEndElement ( ) ; _currToken = JsonToken . VALUE_STRING ; return _currText ; } _parsingContext . setCurrentName ( _cfgNameForTextElement ) ; _nextToken = JsonToken . VALUE_STRING ; _currToken = JsonToken . FIELD_NAME ; break ; case XmlTokenStream . XML_END : _currToken = null ; } return null ; }
Csv
5
src/main/java/org/apache/commons/csv/CSVPrinter.java
323
327
CSVFormat.format allways append null
When I now call CSVFormat.newFormat(';').withSkipHeaderRecord(true).withHeader("H1","H2").format("A","B") I get the output A;Bnull The expected output would be A;B
public void println() throws IOException { final String recordSeparator = format.getRecordSeparator(); out.append(recordSeparator); newRecord = true; }
public void println ( ) throws IOException { final String recordSeparator = format . getRecordSeparator ( ) ; out . append ( recordSeparator ) ; newRecord = true ; }
public void println() throws IOException { final String recordSeparator = format.getRecordSeparator(); if (recordSeparator != null) { out.append(recordSeparator); } newRecord = true; }
public void println ( ) throws IOException { final String recordSeparator = format . getRecordSeparator ( ) ; if ( recordSeparator != null ) { out . append ( recordSeparator ) ; } newRecord = true ; }
Math
84
src/main/java/org/apache/commons/math/optimization/direct/MultiDirectional.java
60
99
MultiDirectional optimzation loops forver if started at the correct solution
MultiDirectional.iterateSimplex loops forever if the starting point is the correct solution. see the attached test case (testMultiDirectionalCorrectStart) as an example.
@Override protected void iterateSimplex(final Comparator<RealPointValuePair> comparator) throws FunctionEvaluationException, OptimizationException, IllegalArgumentException { while (true) { incrementIterationsCounter(); // save the original vertex final RealPointValuePair[] original = simplex; final RealPointValuePair best = original[0]; // perform a reflection step final RealPointValuePair reflected = evaluateNewSimplex(original, 1.0, comparator); if (comparator.compare(reflected, best) < 0) { // compute the expanded simplex final RealPointValuePair[] reflectedSimplex = simplex; final RealPointValuePair expanded = evaluateNewSimplex(original, khi, comparator); if (comparator.compare(reflected, expanded) <= 0) { // accept the reflected simplex simplex = reflectedSimplex; } return; } // compute the contracted simplex final RealPointValuePair contracted = evaluateNewSimplex(original, gamma, comparator); if (comparator.compare(contracted, best) < 0) { // accept the contracted simplex // check convergence return; } } }
@ Override protected void iterateSimplex ( final Comparator < RealPointValuePair > comparator ) throws FunctionEvaluationException , OptimizationException , IllegalArgumentException { while ( true ) { incrementIterationsCounter ( ) ; final RealPointValuePair [ ] original = simplex ; final RealPointValuePair best = original [ 0 ] ; final RealPointValuePair reflected = evaluateNewSimplex ( original , 1.0 , comparator ) ; if ( comparator . compare ( reflected , best ) < 0 ) { final RealPointValuePair [ ] reflectedSimplex = simplex ; final RealPointValuePair expanded = evaluateNewSimplex ( original , khi , comparator ) ; if ( comparator . compare ( reflected , expanded ) <= 0 ) { simplex = reflectedSimplex ; } return ; } final RealPointValuePair contracted = evaluateNewSimplex ( original , gamma , comparator ) ; if ( comparator . compare ( contracted , best ) < 0 ) { return ; } } }
@Override protected void iterateSimplex(final Comparator<RealPointValuePair> comparator) throws FunctionEvaluationException, OptimizationException, IllegalArgumentException { final RealConvergenceChecker checker = getConvergenceChecker(); while (true) { incrementIterationsCounter(); // save the original vertex final RealPointValuePair[] original = simplex; final RealPointValuePair best = original[0]; // perform a reflection step final RealPointValuePair reflected = evaluateNewSimplex(original, 1.0, comparator); if (comparator.compare(reflected, best) < 0) { // compute the expanded simplex final RealPointValuePair[] reflectedSimplex = simplex; final RealPointValuePair expanded = evaluateNewSimplex(original, khi, comparator); if (comparator.compare(reflected, expanded) <= 0) { // accept the reflected simplex simplex = reflectedSimplex; } return; } // compute the contracted simplex final RealPointValuePair contracted = evaluateNewSimplex(original, gamma, comparator); if (comparator.compare(contracted, best) < 0) { // accept the contracted simplex return; } // check convergence final int iter = getIterations(); boolean converged = true; for (int i = 0; i < simplex.length; ++i) { converged &= checker.converged(iter, original[i], simplex[i]); } if (converged) { return; } } }
@ Override protected void iterateSimplex ( final Comparator < RealPointValuePair > comparator ) throws FunctionEvaluationException , OptimizationException , IllegalArgumentException { final RealConvergenceChecker checker = getConvergenceChecker ( ) ; while ( true ) { incrementIterationsCounter ( ) ; final RealPointValuePair [ ] original = simplex ; final RealPointValuePair best = original [ 0 ] ; final RealPointValuePair reflected = evaluateNewSimplex ( original , 1.0 , comparator ) ; if ( comparator . compare ( reflected , best ) < 0 ) { final RealPointValuePair [ ] reflectedSimplex = simplex ; final RealPointValuePair expanded = evaluateNewSimplex ( original , khi , comparator ) ; if ( comparator . compare ( reflected , expanded ) <= 0 ) { simplex = reflectedSimplex ; } return ; } final RealPointValuePair contracted = evaluateNewSimplex ( original , gamma , comparator ) ; if ( comparator . compare ( contracted , best ) < 0 ) { return ; } final int iter = getIterations ( ) ; boolean converged = true ; for ( int i = 0 ; i < simplex . length ; ++ i ) { converged &= checker . converged ( iter , original [ i ] , simplex [ i ] ) ; } if ( converged ) { return ; } } }
Chart
12
source/org/jfree/chart/plot/MultiplePiePlot.java
143
158
Fix for MultiplePiePlot
When dataset is passed into constructor for MultiplePiePlot, the dataset is not wired to a listener, as it would be if setDataset is called.
public MultiplePiePlot(CategoryDataset dataset) { super(); this.dataset = dataset; PiePlot piePlot = new PiePlot(null); this.pieChart = new JFreeChart(piePlot); this.pieChart.removeLegend(); this.dataExtractOrder = TableOrder.BY_COLUMN; this.pieChart.setBackgroundPaint(null); TextTitle seriesTitle = new TextTitle("Series Title", new Font("SansSerif", Font.BOLD, 12)); seriesTitle.setPosition(RectangleEdge.BOTTOM); this.pieChart.setTitle(seriesTitle); this.aggregatedItemsKey = "Other"; this.aggregatedItemsPaint = Color.lightGray; this.sectionPaints = new HashMap(); }
public MultiplePiePlot ( CategoryDataset dataset ) { super ( ) ; this . dataset = dataset ; PiePlot piePlot = new PiePlot ( null ) ; this . pieChart = new JFreeChart ( piePlot ) ; this . pieChart . removeLegend ( ) ; this . dataExtractOrder = TableOrder . BY_COLUMN ; this . pieChart . setBackgroundPaint ( null ) ; TextTitle seriesTitle = new TextTitle ( "Series Title" , new Font ( "SansSerif" , Font . BOLD , 12 ) ) ; seriesTitle . setPosition ( RectangleEdge . BOTTOM ) ; this . pieChart . setTitle ( seriesTitle ) ; this . aggregatedItemsKey = "Other" ; this . aggregatedItemsPaint = Color . lightGray ; this . sectionPaints = new HashMap ( ) ; }
public MultiplePiePlot(CategoryDataset dataset) { super(); setDataset(dataset); PiePlot piePlot = new PiePlot(null); this.pieChart = new JFreeChart(piePlot); this.pieChart.removeLegend(); this.dataExtractOrder = TableOrder.BY_COLUMN; this.pieChart.setBackgroundPaint(null); TextTitle seriesTitle = new TextTitle("Series Title", new Font("SansSerif", Font.BOLD, 12)); seriesTitle.setPosition(RectangleEdge.BOTTOM); this.pieChart.setTitle(seriesTitle); this.aggregatedItemsKey = "Other"; this.aggregatedItemsPaint = Color.lightGray; this.sectionPaints = new HashMap(); }
public MultiplePiePlot ( CategoryDataset dataset ) { super ( ) ; setDataset ( dataset ) ; PiePlot piePlot = new PiePlot ( null ) ; this . pieChart = new JFreeChart ( piePlot ) ; this . pieChart . removeLegend ( ) ; this . dataExtractOrder = TableOrder . BY_COLUMN ; this . pieChart . setBackgroundPaint ( null ) ; TextTitle seriesTitle = new TextTitle ( "Series Title" , new Font ( "SansSerif" , Font . BOLD , 12 ) ) ; seriesTitle . setPosition ( RectangleEdge . BOTTOM ) ; this . pieChart . setTitle ( seriesTitle ) ; this . aggregatedItemsKey = "Other" ; this . aggregatedItemsPaint = Color . lightGray ; this . sectionPaints = new HashMap ( ) ; }
Mockito
28
src/org/mockito/internal/configuration/DefaultInjectionEngine.java
91
95
n@InjectMocks injects mock into Object-typed fields
When using @InjectMocks on a class which has fields of the type "Object", the mock gets injected into that field instead of being only injected into the field it is supposed to be injected to. See attachments for a testcase on this problem. It would be better if @InjectMocks would leave the Object fields alone, because this behaviour is not expected and it is causing my own unit tests to fail. One example of such a failure is the production code having a Object-constant (see the commented out one in TestService as illustration) which mockito tries to override with a mock, but fails because PowerMock is not available. And even if i set the Object-field to non-static, it mangles the logic of my production code, because on class initialization the object is used to register a listener in a different class with that object as a map accessor. Now when mockito overrides the Object-field, the requests are added with the mock object instead of the original Object-instance and thus no listeners are being found by the async service my service uses. I don't like the idea of removing the Object-field as an accessor to instead use "this" (not explicit enough for readability purposes) or some other type as the accessor-type (just more code), instead I think this should better be fixed. Surely there will be more occasions this will make me problems in writing unit tests. Best regards subes
private void injectMockCandidate(Class<?> awaitingInjectionClazz, Set<Object> mocks, Object fieldInstance) { for(Field field : orderedInstanceFieldsFrom(awaitingInjectionClazz)) { mockCandidateFilter.filterCandidate(mocks, field, fieldInstance).thenInject(); } }
private void injectMockCandidate ( Class < ? > awaitingInjectionClazz , Set < Object > mocks , Object fieldInstance ) { for ( Field field : orderedInstanceFieldsFrom ( awaitingInjectionClazz ) ) { mockCandidateFilter . filterCandidate ( mocks , field , fieldInstance ) . thenInject ( ) ; } }
private void injectMockCandidate(Class<?> awaitingInjectionClazz, Set<Object> mocks, Object fieldInstance) { for(Field field : orderedInstanceFieldsFrom(awaitingInjectionClazz)) { Object injected = mockCandidateFilter.filterCandidate(mocks, field, fieldInstance).thenInject(); mocks.remove(injected); } }
private void injectMockCandidate ( Class < ? > awaitingInjectionClazz , Set < Object > mocks , Object fieldInstance ) { for ( Field field : orderedInstanceFieldsFrom ( awaitingInjectionClazz ) ) { Object injected = mockCandidateFilter . filterCandidate ( mocks , field , fieldInstance ) . thenInject ( ) ; mocks . remove ( injected ) ; } }
Math
60
src/main/java/org/apache/commons/math/distribution/NormalDistributionImpl.java
124
138
ConvergenceException in NormalDistributionImpl.cumulativeProbability()
I get a ConvergenceException in NormalDistributionImpl.cumulativeProbability() for very large/small parameters including Infinity, -Infinity. For instance in the following code: @Test public void testCumulative() { final NormalDistribution nd = new NormalDistributionImpl(); for (int i = 0; i < 500; i++) { final double val = Math.exp(i); try { System.out.println("val = " + val + " cumulative = " + nd.cumulativeProbability(val)); } catch (MathException e) { e.printStackTrace(); fail(); } } } In version 2.0, I get no exception. My suggestion is to change in the implementation of cumulativeProbability(double) to catch all ConvergenceException (and return for very large and very small values), not just MaxIterationsExceededException.
public double cumulativeProbability(double x) throws MathException { final double dev = x - mean; try { return 0.5 * (1.0 + Erf.erf((dev) / (standardDeviation * FastMath.sqrt(2.0)))); } catch (MaxIterationsExceededException ex) { if (x < (mean - 20 * standardDeviation)) { // JDK 1.5 blows at 38 return 0; } else if (x > (mean + 20 * standardDeviation)) { return 1; } else { throw ex; } } }
public double cumulativeProbability ( double x ) throws MathException { final double dev = x - mean ; try { return 0.5 * ( 1.0 + Erf . erf ( ( dev ) / ( standardDeviation * FastMath . sqrt ( 2.0 ) ) ) ) ; } catch ( MaxIterationsExceededException ex ) { if ( x < ( mean - 20 * standardDeviation ) ) { return 0 ; } else if ( x > ( mean + 20 * standardDeviation ) ) { return 1 ; } else { throw ex ; } } }
public double cumulativeProbability(double x) throws MathException { final double dev = x - mean; if (FastMath.abs(dev) > 40 * standardDeviation) { return dev < 0 ? 0.0d : 1.0d; } return 0.5 * (1.0 + Erf.erf((dev) / (standardDeviation * FastMath.sqrt(2.0)))); }
public double cumulativeProbability ( double x ) throws MathException { final double dev = x - mean ; if ( FastMath . abs ( dev ) > 40 * standardDeviation ) { return dev < 0 ? 0.0d : 1.0d ; } return 0.5 * ( 1.0 + Erf . erf ( ( dev ) / ( standardDeviation * FastMath . sqrt ( 2.0 ) ) ) ) ; }
Mockito
7
src/org/mockito/internal/util/reflection/GenericMetadataSupport.java
375
380
Deep stubbing with generic responses in the call chain is not working
Deep stubbing will throw an Exception if multiple generics occur in the call chain. For instance, consider having a mock `myMock1` that provides a function that returns a generic `T`. If `T` also has a function that returns a generic, an Exception with the message "Raw extraction not supported for : 'null'" will be thrown. As an example the following test will throw an Exception: ``` Java public class MockitoGenericsDeepStubTest { @Test public void discoverDeepMockingOfGenerics() { MyClass1 myMock1 = mock(MyClass1.class, RETURNS_DEEP_STUBS); when(myMock1.getNested().getNested().returnSomething()).thenReturn("Hello World."); } public static interface MyClass1 <MC2 extends MyClass2> { public MC2 getNested(); } public static interface MyClass2<MC3 extends MyClass3> { public MC3 getNested(); } public static interface MyClass3 { public String returnSomething(); } } ``` You can make this test run if you step into the class `ReturnsDeepStubs` and change the method `withSettingsUsing` to return `MockSettings` with `ReturnsDeepStubs` instead of `ReturnsDeepStubsSerializationFallback` as default answer: ``` Java private MockSettings withSettingsUsing(GenericMetadataSupport returnTypeGenericMetadata, MockCreationSettings parentMockSettings) { MockSettings mockSettings = returnTypeGenericMetadata.hasRawExtraInterfaces() ? withSettings().extraInterfaces(returnTypeGenericMetadata.rawExtraInterfaces()) : withSettings(); return propagateSerializationSettings(mockSettings, parentMockSettings) .defaultAnswer(this); } ``` However, this breaks other tests and features. I think, the issue is that further generics are not possible to be mocked by `ReturnsDeepStubsSerializationFallback` since the `GenericMetadataSupport` is "closed" at this point. Thanks and kind regards Tobias
private void readTypeVariables() { for (Type type : typeVariable.getBounds()) { registerTypeVariablesOn(type); } registerTypeVariablesOn(getActualTypeArgumentFor(typeVariable)); }
private void readTypeVariables ( ) { for ( Type type : typeVariable . getBounds ( ) ) { registerTypeVariablesOn ( type ) ; } registerTypeVariablesOn ( getActualTypeArgumentFor ( typeVariable ) ) ; }
private void readTypeVariables() { for (Type type : typeVariable.getBounds()) { registerTypeVariablesOn(type); } registerTypeParametersOn(new TypeVariable[] { typeVariable }); registerTypeVariablesOn(getActualTypeArgumentFor(typeVariable)); }
private void readTypeVariables ( ) { for ( Type type : typeVariable . getBounds ( ) ) { registerTypeVariablesOn ( type ) ; } registerTypeParametersOn ( new TypeVariable [ ] { typeVariable } ) ; registerTypeVariablesOn ( getActualTypeArgumentFor ( typeVariable ) ) ; }
Compress
38
src/main/java/org/apache/commons/compress/archivers/tar/TarArchiveEntry.java
849
864
PAX header entry name ending with / causes problems
There seems to be a problem when a PAX header entry (link flag is 'x') has a name ending with "/". The {{TarArchiveEntry.isDirectory()}} check ends up returning {{true}} because of the trailing slash which means no content can be read from the entry. PAX header parsing effectively finds nothing and the stream is not advanced; this leaves the stream in a bad state as the next entry's header is actually read from the header contents. If the name is modified to remove the trailing slash when the link flag indicates a PAX header everything seems to work fine. That would be one potential fix in {{parseTarHeader}}. Changing {{isDirectory}} to return {{false}} if {{isPaxHeader}} is {{true}} (before the trailing "/" check) would probably also fix the issue (though I can't verify that in the debugger like I can with changing the name). So far I have only seen this when using Docker to save images that contain a yum database. For example: {noformat} docker pull centos:latest && docker save centos:latest | tar x --include "*/layer.tar" {noformat} Will produce at least one "layer.tar" that exhibits this issue. If I come across a smaller TAR for testing I will attach it.
@Override public boolean isDirectory() { if (file != null) { return file.isDirectory(); } if (linkFlag == LF_DIR) { return true; } if (getName().endsWith("/")) { return true; } return false; }
@ Override public boolean isDirectory ( ) { if ( file != null ) { return file . isDirectory ( ) ; } if ( linkFlag == LF_DIR ) { return true ; } if ( getName ( ) . endsWith ( "/" ) ) { return true ; } return false ; }
@Override public boolean isDirectory() { if (file != null) { return file.isDirectory(); } if (linkFlag == LF_DIR) { return true; } if (!isPaxHeader() && !isGlobalPaxHeader() && getName().endsWith("/")) { return true; } return false; }
@ Override public boolean isDirectory ( ) { if ( file != null ) { return file . isDirectory ( ) ; } if ( linkFlag == LF_DIR ) { return true ; } if ( ! isPaxHeader ( ) && ! isGlobalPaxHeader ( ) && getName ( ) . endsWith ( "/" ) ) { return true ; } return false ; }
Cli
20
src/java/org/apache/commons/cli/PosixParser.java
97
159
PosixParser keeps processing tokens after a non unrecognized long option
PosixParser keeps processing tokens after a non unrecognized long option when stopAtNonOption is enabled. The tokens after the unrecognized long option are burst, split around '=', etc.. instead of being kept as is. For example, with the options 'a' and 'b' defined, 'b' having an argument, the following command line: {code}--zop -abfoo{code} is interpreted as: {code}--zop -a -b foo{code} but the last token should remain unchanged.
protected String[] flatten(Options options, String[] arguments, boolean stopAtNonOption) { init(); this.options = options; // an iterator for the command line tokens Iterator iter = Arrays.asList(arguments).iterator(); // process each command line token while (iter.hasNext()) { // get the next command line token String token = (String) iter.next(); // handle long option --foo or --foo=bar if (token.startsWith("--")) { if (token.indexOf('=') != -1) { tokens.add(token.substring(0, token.indexOf('='))); tokens.add(token.substring(token.indexOf('=') + 1, token.length())); } else { tokens.add(token); } } // single hyphen else if ("-".equals(token)) { tokens.add(token); } else if (token.startsWith("-")) { if (token.length() == 2) { processOptionToken(token, stopAtNonOption); } else if (options.hasOption(token)) { tokens.add(token); } // requires bursting else { burstToken(token, stopAtNonOption); } } else if (stopAtNonOption) { process(token); } else { tokens.add(token); } gobble(iter); } return (String[]) tokens.toArray(new String[tokens.size()]); }
protected String [ ] flatten ( Options options , String [ ] arguments , boolean stopAtNonOption ) { init ( ) ; this . options = options ; Iterator iter = Arrays . asList ( arguments ) . iterator ( ) ; while ( iter . hasNext ( ) ) { String token = ( String ) iter . next ( ) ; if ( token . startsWith ( "--" ) ) { if ( token . indexOf ( '=' ) != - 1 ) { tokens . add ( token . substring ( 0 , token . indexOf ( '=' ) ) ) ; tokens . add ( token . substring ( token . indexOf ( '=' ) + 1 , token . length ( ) ) ) ; } else { tokens . add ( token ) ; } } else if ( "-" . equals ( token ) ) { tokens . add ( token ) ; } else if ( token . startsWith ( "-" ) ) { if ( token . length ( ) == 2 ) { processOptionToken ( token , stopAtNonOption ) ; } else if ( options . hasOption ( token ) ) { tokens . add ( token ) ; } else { burstToken ( token , stopAtNonOption ) ; } } else if ( stopAtNonOption ) { process ( token ) ; } else { tokens . add ( token ) ; } gobble ( iter ) ; } return ( String [ ] ) tokens . toArray ( new String [ tokens . size ( ) ] ) ; }
protected String[] flatten(Options options, String[] arguments, boolean stopAtNonOption) { init(); this.options = options; // an iterator for the command line tokens Iterator iter = Arrays.asList(arguments).iterator(); // process each command line token while (iter.hasNext()) { // get the next command line token String token = (String) iter.next(); // handle long option --foo or --foo=bar if (token.startsWith("--")) { int pos = token.indexOf('='); String opt = pos == -1 ? token : token.substring(0, pos); // --foo if (!options.hasOption(opt) && stopAtNonOption) { process(token); } else { tokens.add(opt); if (pos != -1) { tokens.add(token.substring(pos + 1)); } } } // single hyphen else if ("-".equals(token)) { tokens.add(token); } else if (token.startsWith("-")) { if (token.length() == 2) { processOptionToken(token, stopAtNonOption); } else if (options.hasOption(token)) { tokens.add(token); } // requires bursting else { burstToken(token, stopAtNonOption); } } else if (stopAtNonOption) { process(token); } else { tokens.add(token); } gobble(iter); } return (String[]) tokens.toArray(new String[tokens.size()]); }
protected String [ ] flatten ( Options options , String [ ] arguments , boolean stopAtNonOption ) { init ( ) ; this . options = options ; Iterator iter = Arrays . asList ( arguments ) . iterator ( ) ; while ( iter . hasNext ( ) ) { String token = ( String ) iter . next ( ) ; if ( token . startsWith ( "--" ) ) { int pos = token . indexOf ( '=' ) ; String opt = pos == - 1 ? token : token . substring ( 0 , pos ) ; if ( ! options . hasOption ( opt ) && stopAtNonOption ) { process ( token ) ; } else { tokens . add ( opt ) ; if ( pos != - 1 ) { tokens . add ( token . substring ( pos + 1 ) ) ; } } } else if ( "-" . equals ( token ) ) { tokens . add ( token ) ; } else if ( token . startsWith ( "-" ) ) { if ( token . length ( ) == 2 ) { processOptionToken ( token , stopAtNonOption ) ; } else if ( options . hasOption ( token ) ) { tokens . add ( token ) ; } else { burstToken ( token , stopAtNonOption ) ; } } else if ( stopAtNonOption ) { process ( token ) ; } else { tokens . add ( token ) ; } gobble ( iter ) ; } return ( String [ ] ) tokens . toArray ( new String [ tokens . size ( ) ] ) ; }
Compress
1
src/main/java/org/apache/commons/compress/archivers/cpio/CpioArchiveOutputStream.java
344
349
CPIO reports unexpected EOF
When unpacking an CPIO archive (made with the compress classes or even made with OSX cpio comandline tool) an EOF exception is thrown. Here is the testcode: final File input = getFile("cmdcreated.cpio"); final InputStream in = new FileInputStream(input); CpioArchiveInputStream cin = new CpioArchiveInputStream(in); CpioArchiveEntry entry = null; while ((entry = (CpioArchiveEntry) cin.getNextCPIOEntry()) != null) { File target = new File(dir, entry.getName()); final OutputStream out = new FileOutputStream(target); IOUtils.copy(in, out); out.close(); } cin.close(); Stacktrace is here: java.io.EOFException at org.apache.commons.compress.archivers.cpio.CpioArchiveInputStream.readFully(CpioArchiveInputStream.java:293) at org.apache.commons.compress.archivers.cpio.CpioArchiveInputStream.getNextCPIOEntry(CpioArchiveInputStream.java:168) at org.apache.commons.compress.archivers.cpio.CpioArchiveInputStreamTest.testCpioUnpack(CpioArchiveInputStreamTest.java:26) ... This happens with the first read access to the archive. It occured while my try to improve the testcases.
public void close() throws IOException { if (!this.closed) { super.close(); this.closed = true; } }
public void close ( ) throws IOException { if ( ! this . closed ) { super . close ( ) ; this . closed = true ; } }
public void close() throws IOException { if (!this.closed) { this.finish(); super.close(); this.closed = true; } }
public void close ( ) throws IOException { if ( ! this . closed ) { this . finish ( ) ; super . close ( ) ; this . closed = true ; } }
JacksonDatabind
100
src/main/java/com/fasterxml/jackson/databind/node/TreeTraversingParser.java
354
376
`TreeTraversingParser` does not take base64 variant into account
This affects at least 2.6.4 to current versions. In [TreeTraversingParser#getBinaryValue](https://github.com/FasterXML/jackson-databind/blob/master/src/main/java/com/fasterxml/jackson/databind/node/TreeTraversingParser.java#L348), a `Base64Variant` is accepted but ignored. The call to `n.binaryValue()`, when `n` is a `TextNode`, then uses the default Base64 variant instead of what's specified. It seems the correct behavior would be to call `TextNode#getBinaryValue` instead.
@Override public byte[] getBinaryValue(Base64Variant b64variant) throws IOException, JsonParseException { // Multiple possibilities... JsonNode n = currentNode(); if (n != null) { // [databind#2096]: although `binaryValue()` works for real binary node // and embedded "POJO" node, coercion from TextNode may require variant, so: byte[] data = n.binaryValue(); if (data != null) { return data; } if (n.isPojo()) { Object ob = ((POJONode) n).getPojo(); if (ob instanceof byte[]) { return (byte[]) ob; } } } // otherwise return null to mark we have no binary content return null; }
@ Override public byte [ ] getBinaryValue ( Base64Variant b64variant ) throws IOException , JsonParseException { JsonNode n = currentNode ( ) ; if ( n != null ) { byte [ ] data = n . binaryValue ( ) ; if ( data != null ) { return data ; } if ( n . isPojo ( ) ) { Object ob = ( ( POJONode ) n ) . getPojo ( ) ; if ( ob instanceof byte [ ] ) { return ( byte [ ] ) ob ; } } } return null ; }
@Override public byte[] getBinaryValue(Base64Variant b64variant) throws IOException, JsonParseException { // Multiple possibilities... JsonNode n = currentNode(); if (n != null) { // [databind#2096]: although `binaryValue()` works for real binary node // and embedded "POJO" node, coercion from TextNode may require variant, so: if (n instanceof TextNode) { return ((TextNode) n).getBinaryValue(b64variant); } return n.binaryValue(); } // otherwise return null to mark we have no binary content return null; }
@ Override public byte [ ] getBinaryValue ( Base64Variant b64variant ) throws IOException , JsonParseException { JsonNode n = currentNode ( ) ; if ( n != null ) { if ( n instanceof TextNode ) { return ( ( TextNode ) n ) . getBinaryValue ( b64variant ) ; } return n . binaryValue ( ) ; } return null ; }
Math
21
src/main/java/org/apache/commons/math3/linear/RectangularCholeskyDecomposition.java
62
151
Correlated random vector generator fails (silently) when faced with zero rows in covariance matrix
The following three matrices (which are basically permutations of each other) produce different results when sampling a multi-variate Gaussian with the help of CorrelatedRandomVectorGenerator (sample covariances calculated in R, based on 10,000 samples): Array2DRowRealMatrix{ {0.0,0.0,0.0,0.0,0.0}, {0.0,0.013445532,0.01039469,0.009881156,0.010499559}, {0.0,0.01039469,0.023006616,0.008196856,0.010732709}, {0.0,0.009881156,0.008196856,0.019023866,0.009210099}, {0.0,0.010499559,0.010732709,0.009210099,0.019107243}} > cov(data1) V1 V2 V3 V4 V5 V1 0 0.000000000 0.00000000 0.000000000 0.000000000 V2 0 0.013383931 0.01034401 0.009913271 0.010506733 V3 0 0.010344006 0.02309479 0.008374730 0.010759306 V4 0 0.009913271 0.00837473 0.019005488 0.009187287 V5 0 0.010506733 0.01075931 0.009187287 0.019021483 Array2DRowRealMatrix{ {0.013445532,0.01039469,0.0,0.009881156,0.010499559}, {0.01039469,0.023006616,0.0,0.008196856,0.010732709}, {0.0,0.0,0.0,0.0,0.0}, {0.009881156,0.008196856,0.0,0.019023866,0.009210099}, {0.010499559,0.010732709,0.0,0.009210099,0.019107243}} > cov(data2) V1 V2 V3 V4 V5 V1 0.006922905 0.010507692 0 0.005817399 0.010330529 V2 0.010507692 0.023428918 0 0.008273152 0.010735568 V3 0.000000000 0.000000000 0 0.000000000 0.000000000 V4 0.005817399 0.008273152 0 0.004929843 0.009048759 V5 0.010330529 0.010735568 0 0.009048759 0.018683544 Array2DRowRealMatrix{ {0.013445532,0.01039469,0.009881156,0.010499559}, {0.01039469,0.023006616,0.008196856,0.010732709}, {0.009881156,0.008196856,0.019023866,0.009210099}, {0.010499559,0.010732709,0.009210099,0.019107243}} > cov(data3) V1 V2 V3 V4 V1 0.013445047 0.010478862 0.009955904 0.010529542 V2 0.010478862 0.022910522 0.008610113 0.011046353 V3 0.009955904 0.008610113 0.019250975 0.009464442 V4 0.010529542 0.011046353 0.009464442 0.019260317 I've traced this back to the RectangularCholeskyDecomposition, which does not seem to handle the second matrix very well (decompositions in the same order as the matrices above): CorrelatedRandomVectorGenerator.getRootMatrix() = Array2DRowRealMatrix{{0.0,0.0,0.0,0.0,0.0},{0.0759577418122063,0.0876125188474239,0.0,0.0,0.0},{0.07764443622513505,0.05132821221460752,0.11976381821791235,0.0,0.0},{0.06662930527909404,0.05501661744114585,0.0016662506519307997,0.10749324207653632,0.0},{0.13822895138139477,0.0,0.0,0.0,0.0}} CorrelatedRandomVectorGenerator.getRank() = 5 CorrelatedRandomVectorGenerator.getRootMatrix() = Array2DRowRealMatrix{{0.0759577418122063,0.034512751379448724,0.0},{0.07764443622513505,0.13029949164628746,0.0},{0.0,0.0,0.0},{0.06662930527909404,0.023203936694855674,0.0},{0.13822895138139477,0.0,0.0}} CorrelatedRandomVectorGenerator.getRank() = 3 CorrelatedRandomVectorGenerator.getRootMatrix() = Array2DRowRealMatrix{{0.0759577418122063,0.034512751379448724,0.033913748226348225,0.07303890149947785},{0.07764443622513505,0.13029949164628746,0.0,0.0},{0.06662930527909404,0.023203936694855674,0.11851573313229945,0.0},{0.13822895138139477,0.0,0.0,0.0}} CorrelatedRandomVectorGenerator.getRank() = 4 Clearly, the rank of each of these matrices should be 4. The first matrix does not lead to incorrect results, but the second one does. Unfortunately, I don't know enough about the Cholesky decomposition to find the flaw in the implementation, and I could not find documentation for the "rectangular" variant (also not at the links provided in the javadoc).
public RectangularCholeskyDecomposition(RealMatrix matrix, double small) throws NonPositiveDefiniteMatrixException { final int order = matrix.getRowDimension(); final double[][] c = matrix.getData(); final double[][] b = new double[order][order]; int[] swap = new int[order]; int[] index = new int[order]; for (int i = 0; i < order; ++i) { index[i] = i; } int r = 0; for (boolean loop = true; loop;) { // find maximal diagonal element swap[r] = r; for (int i = r + 1; i < order; ++i) { int ii = index[i]; int isi = index[swap[i]]; if (c[ii][ii] > c[isi][isi]) { swap[r] = i; } } // swap elements if (swap[r] != r) { int tmp = index[r]; index[r] = index[swap[r]]; index[swap[r]] = tmp; } // check diagonal element int ir = index[r]; if (c[ir][ir] < small) { if (r == 0) { throw new NonPositiveDefiniteMatrixException(c[ir][ir], ir, small); } // check remaining diagonal elements for (int i = r; i < order; ++i) { if (c[index[i]][index[i]] < -small) { // there is at least one sufficiently negative diagonal element, // the symmetric positive semidefinite matrix is wrong throw new NonPositiveDefiniteMatrixException(c[index[i]][index[i]], i, small); } } // all remaining diagonal elements are close to zero, we consider we have // found the rank of the symmetric positive semidefinite matrix ++r; loop = false; } else { // transform the matrix final double sqrt = FastMath.sqrt(c[ir][ir]); b[r][r] = sqrt; final double inverse = 1 / sqrt; for (int i = r + 1; i < order; ++i) { final int ii = index[i]; final double e = inverse * c[ii][ir]; b[i][r] = e; c[ii][ii] -= e * e; for (int j = r + 1; j < i; ++j) { final int ij = index[j]; final double f = c[ii][ij] - e * b[j][r]; c[ii][ij] = f; c[ij][ii] = f; } } // prepare next iteration loop = ++r < order; } } // build the root matrix rank = r; root = MatrixUtils.createRealMatrix(order, r); for (int i = 0; i < order; ++i) { for (int j = 0; j < r; ++j) { root.setEntry(index[i], j, b[i][j]); } } }
public RectangularCholeskyDecomposition ( RealMatrix matrix , double small ) throws NonPositiveDefiniteMatrixException { final int order = matrix . getRowDimension ( ) ; final double [ ] [ ] c = matrix . getData ( ) ; final double [ ] [ ] b = new double [ order ] [ order ] ; int [ ] swap = new int [ order ] ; int [ ] index = new int [ order ] ; for ( int i = 0 ; i < order ; ++ i ) { index [ i ] = i ; } int r = 0 ; for ( boolean loop = true ; loop ; ) { swap [ r ] = r ; for ( int i = r + 1 ; i < order ; ++ i ) { int ii = index [ i ] ; int isi = index [ swap [ i ] ] ; if ( c [ ii ] [ ii ] > c [ isi ] [ isi ] ) { swap [ r ] = i ; } } if ( swap [ r ] != r ) { int tmp = index [ r ] ; index [ r ] = index [ swap [ r ] ] ; index [ swap [ r ] ] = tmp ; } int ir = index [ r ] ; if ( c [ ir ] [ ir ] < small ) { if ( r == 0 ) { throw new NonPositiveDefiniteMatrixException ( c [ ir ] [ ir ] , ir , small ) ; } for ( int i = r ; i < order ; ++ i ) { if ( c [ index [ i ] ] [ index [ i ] ] < - small ) { throw new NonPositiveDefiniteMatrixException ( c [ index [ i ] ] [ index [ i ] ] , i , small ) ; } } ++ r ; loop = false ; } else { final double sqrt = FastMath . sqrt ( c [ ir ] [ ir ] ) ; b [ r ] [ r ] = sqrt ; final double inverse = 1 / sqrt ; for ( int i = r + 1 ; i < order ; ++ i ) { final int ii = index [ i ] ; final double e = inverse * c [ ii ] [ ir ] ; b [ i ] [ r ] = e ; c [ ii ] [ ii ] -= e * e ; for ( int j = r + 1 ; j < i ; ++ j ) { final int ij = index [ j ] ; final double f = c [ ii ] [ ij ] - e * b [ j ] [ r ] ; c [ ii ] [ ij ] = f ; c [ ij ] [ ii ] = f ; } } loop = ++ r < order ; } } rank = r ; root = MatrixUtils . createRealMatrix ( order , r ) ; for ( int i = 0 ; i < order ; ++ i ) { for ( int j = 0 ; j < r ; ++ j ) { root . setEntry ( index [ i ] , j , b [ i ] [ j ] ) ; } } }
public RectangularCholeskyDecomposition(RealMatrix matrix, double small) throws NonPositiveDefiniteMatrixException { final int order = matrix.getRowDimension(); final double[][] c = matrix.getData(); final double[][] b = new double[order][order]; int[] index = new int[order]; for (int i = 0; i < order; ++i) { index[i] = i; } int r = 0; for (boolean loop = true; loop;) { // find maximal diagonal element int swapR = r; for (int i = r + 1; i < order; ++i) { int ii = index[i]; int isr = index[swapR]; if (c[ii][ii] > c[isr][isr]) { swapR = i; } } // swap elements if (swapR != r) { final int tmpIndex = index[r]; index[r] = index[swapR]; index[swapR] = tmpIndex; final double[] tmpRow = b[r]; b[r] = b[swapR]; b[swapR] = tmpRow; } // check diagonal element int ir = index[r]; if (c[ir][ir] < small) { if (r == 0) { throw new NonPositiveDefiniteMatrixException(c[ir][ir], ir, small); } // check remaining diagonal elements for (int i = r; i < order; ++i) { if (c[index[i]][index[i]] < -small) { // there is at least one sufficiently negative diagonal element, // the symmetric positive semidefinite matrix is wrong throw new NonPositiveDefiniteMatrixException(c[index[i]][index[i]], i, small); } } // all remaining diagonal elements are close to zero, we consider we have // found the rank of the symmetric positive semidefinite matrix ++r; loop = false; } else { // transform the matrix final double sqrt = FastMath.sqrt(c[ir][ir]); b[r][r] = sqrt; final double inverse = 1 / sqrt; final double inverse2 = 1 / c[ir][ir]; for (int i = r + 1; i < order; ++i) { final int ii = index[i]; final double e = inverse * c[ii][ir]; b[i][r] = e; c[ii][ii] -= c[ii][ir] * c[ii][ir] * inverse2; for (int j = r + 1; j < i; ++j) { final int ij = index[j]; final double f = c[ii][ij] - e * b[j][r]; c[ii][ij] = f; c[ij][ii] = f; } } // prepare next iteration loop = ++r < order; } } // build the root matrix rank = r; root = MatrixUtils.createRealMatrix(order, r); for (int i = 0; i < order; ++i) { for (int j = 0; j < r; ++j) { root.setEntry(index[i], j, b[i][j]); } } }
public RectangularCholeskyDecomposition ( RealMatrix matrix , double small ) throws NonPositiveDefiniteMatrixException { final int order = matrix . getRowDimension ( ) ; final double [ ] [ ] c = matrix . getData ( ) ; final double [ ] [ ] b = new double [ order ] [ order ] ; int [ ] index = new int [ order ] ; for ( int i = 0 ; i < order ; ++ i ) { index [ i ] = i ; } int r = 0 ; for ( boolean loop = true ; loop ; ) { int swapR = r ; for ( int i = r + 1 ; i < order ; ++ i ) { int ii = index [ i ] ; int isr = index [ swapR ] ; if ( c [ ii ] [ ii ] > c [ isr ] [ isr ] ) { swapR = i ; } } if ( swapR != r ) { final int tmpIndex = index [ r ] ; index [ r ] = index [ swapR ] ; index [ swapR ] = tmpIndex ; final double [ ] tmpRow = b [ r ] ; b [ r ] = b [ swapR ] ; b [ swapR ] = tmpRow ; } int ir = index [ r ] ; if ( c [ ir ] [ ir ] < small ) { if ( r == 0 ) { throw new NonPositiveDefiniteMatrixException ( c [ ir ] [ ir ] , ir , small ) ; } for ( int i = r ; i < order ; ++ i ) { if ( c [ index [ i ] ] [ index [ i ] ] < - small ) { throw new NonPositiveDefiniteMatrixException ( c [ index [ i ] ] [ index [ i ] ] , i , small ) ; } } ++ r ; loop = false ; } else { final double sqrt = FastMath . sqrt ( c [ ir ] [ ir ] ) ; b [ r ] [ r ] = sqrt ; final double inverse = 1 / sqrt ; final double inverse2 = 1 / c [ ir ] [ ir ] ; for ( int i = r + 1 ; i < order ; ++ i ) { final int ii = index [ i ] ; final double e = inverse * c [ ii ] [ ir ] ; b [ i ] [ r ] = e ; c [ ii ] [ ii ] -= c [ ii ] [ ir ] * c [ ii ] [ ir ] * inverse2 ; for ( int j = r + 1 ; j < i ; ++ j ) { final int ij = index [ j ] ; final double f = c [ ii ] [ ij ] - e * b [ j ] [ r ] ; c [ ii ] [ ij ] = f ; c [ ij ] [ ii ] = f ; } } loop = ++ r < order ; } } rank = r ; root = MatrixUtils . createRealMatrix ( order , r ) ; for ( int i = 0 ; i < order ; ++ i ) { for ( int j = 0 ; j < r ; ++ j ) { root . setEntry ( index [ i ] , j , b [ i ] [ j ] ) ; } } }
JacksonDatabind
44
src/main/java/com/fasterxml/jackson/databind/type/SimpleType.java
121
141
Problem with polymorphic types, losing properties from base type(s)
(background, see: https://github.com/dropwizard/dropwizard/pull/1449) Looks like sub-type resolution may be broken for one particular case: that of using `defaultImpl`. If so, appears like properties from super-types are not properly resolved; guessing this could be follow-up item for #1083 (even sooner than I thought...).
@Override @Deprecated protected JavaType _narrow(Class<?> subclass) { if (_class == subclass) { return this; } // Should we check that there is a sub-class relationship? // 15-Jan-2016, tatu: Almost yes, but there are some complications with // placeholder values (`Void`, `NoClass`), so can not quite do yet. // TODO: fix in 2.8 /* throw new IllegalArgumentException("Class "+subclass.getName()+" not sub-type of " +_class.getName()); */ return new SimpleType(subclass, _bindings, this, _superInterfaces, _valueHandler, _typeHandler, _asStatic); // Otherwise, stitch together the hierarchy. First, super-class // if not found, try a super-interface // should not get here but... }
@ Override @ Deprecated protected JavaType _narrow ( Class < ? > subclass ) { if ( _class == subclass ) { return this ; } return new SimpleType ( subclass , _bindings , this , _superInterfaces , _valueHandler , _typeHandler , _asStatic ) ; }
@Override @Deprecated protected JavaType _narrow(Class<?> subclass) { if (_class == subclass) { return this; } // Should we check that there is a sub-class relationship? // 15-Jan-2016, tatu: Almost yes, but there are some complications with // placeholder values (`Void`, `NoClass`), so can not quite do yet. // TODO: fix in 2.8 if (!_class.isAssignableFrom(subclass)) { /* throw new IllegalArgumentException("Class "+subclass.getName()+" not sub-type of " +_class.getName()); */ return new SimpleType(subclass, _bindings, this, _superInterfaces, _valueHandler, _typeHandler, _asStatic); } // Otherwise, stitch together the hierarchy. First, super-class Class<?> next = subclass.getSuperclass(); if (next == _class) { // straight up parent class? Great. return new SimpleType(subclass, _bindings, this, _superInterfaces, _valueHandler, _typeHandler, _asStatic); } if ((next != null) && _class.isAssignableFrom(next)) { JavaType superb = _narrow(next); return new SimpleType(subclass, _bindings, superb, null, _valueHandler, _typeHandler, _asStatic); } // if not found, try a super-interface Class<?>[] nextI = subclass.getInterfaces(); for (Class<?> iface : nextI) { if (iface == _class) { // directly implemented return new SimpleType(subclass, _bindings, null, new JavaType[] { this }, _valueHandler, _typeHandler, _asStatic); } if (_class.isAssignableFrom(iface)) { // indirect, so recurse JavaType superb = _narrow(iface); return new SimpleType(subclass, _bindings, null, new JavaType[] { superb }, _valueHandler, _typeHandler, _asStatic); } } // should not get here but... throw new IllegalArgumentException("Internal error: Can not resolve sub-type for Class "+subclass.getName()+" to " +_class.getName()); }
@ Override @ Deprecated protected JavaType _narrow ( Class < ? > subclass ) { if ( _class == subclass ) { return this ; } if ( ! _class . isAssignableFrom ( subclass ) ) { return new SimpleType ( subclass , _bindings , this , _superInterfaces , _valueHandler , _typeHandler , _asStatic ) ; } Class < ? > next = subclass . getSuperclass ( ) ; if ( next == _class ) { return new SimpleType ( subclass , _bindings , this , _superInterfaces , _valueHandler , _typeHandler , _asStatic ) ; } if ( ( next != null ) && _class . isAssignableFrom ( next ) ) { JavaType superb = _narrow ( next ) ; return new SimpleType ( subclass , _bindings , superb , null , _valueHandler , _typeHandler , _asStatic ) ; } Class < ? > [ ] nextI = subclass . getInterfaces ( ) ; for ( Class < ? > iface : nextI ) { if ( iface == _class ) { return new SimpleType ( subclass , _bindings , null , new JavaType [ ] { this } , _valueHandler , _typeHandler , _asStatic ) ; } if ( _class . isAssignableFrom ( iface ) ) { JavaType superb = _narrow ( iface ) ; return new SimpleType ( subclass , _bindings , null , new JavaType [ ] { superb } , _valueHandler , _typeHandler , _asStatic ) ; } } throw new IllegalArgumentException ( "Internal error: Can not resolve sub-type for Class " + subclass . getName ( ) + " to " + _class . getName ( ) ) ; }
JacksonCore
4
src/main/java/com/fasterxml/jackson/core/util/TextBuffer.java
580
588
What is the maximum key length allowed?
I noticed that even in Jackson 2.4, if a JSON key is longer than 262144 bytes, ArrayIndexOutOfBoundsException is thrown from TextBuffer. Below is the stack trace: ``` java.lang.ArrayIndexOutOfBoundsException at java.lang.System.arraycopy(Native Method) at com.fasterxml.jackson.core.util.TextBuffer.expandCurrentSegment(TextBuffer.java:604) at com.fasterxml.jackson.core.json.UTF8StreamJsonParser.addName(UTF8StreamJsonParser.java:2034) at com.fasterxml.jackson.core.json.UTF8StreamJsonParser.findName(UTF8StreamJsonParser.java:1928) at com.fasterxml.jackson.core.json.UTF8StreamJsonParser.parseLongFieldName(UTF8StreamJsonParser.java:1534) at com.fasterxml.jackson.core.json.UTF8StreamJsonParser.parseMediumFieldName(UTF8StreamJsonParser.java:1502) at com.fasterxml.jackson.core.json.UTF8StreamJsonParser._parseFieldName(UTF8StreamJsonParser.java:1437) at com.fasterxml.jackson.core.json.UTF8StreamJsonParser.nextToken(UTF8StreamJsonParser.java:668) ... <below are our code> ... ``` Looking at TextBuffer.expandCurrentSegment(TextBuffer.java:604), once the length of _currentSegment is increased to MAX_SEGMENT_LEN + 1 (262145) bytes, the newLen will stay at MAX_SEGMENT_LEN, which is smaller than len. Therefore System.arraycopy() will fail. I understand it is rare to have key larger than 262144 bytes, but it would be nice if - Jackson explicitly throw exception stating that key is too long. - Document that the maximum key length is 262144 bytes. OR - Update TextBuffer to support super long key. Thanks!
public char[] expandCurrentSegment() { final char[] curr = _currentSegment; // Let's grow by 50% by default final int len = curr.length; // but above intended maximum, slow to increase by 25% int newLen = (len == MAX_SEGMENT_LEN) ? (MAX_SEGMENT_LEN+1) : Math.min(MAX_SEGMENT_LEN, len + (len >> 1)); return (_currentSegment = Arrays.copyOf(curr, newLen)); }
public char [ ] expandCurrentSegment ( ) { final char [ ] curr = _currentSegment ; final int len = curr . length ; int newLen = ( len == MAX_SEGMENT_LEN ) ? ( MAX_SEGMENT_LEN + 1 ) : Math . min ( MAX_SEGMENT_LEN , len + ( len >> 1 ) ) ; return ( _currentSegment = Arrays . copyOf ( curr , newLen ) ) ; }
public char[] expandCurrentSegment() { final char[] curr = _currentSegment; // Let's grow by 50% by default final int len = curr.length; int newLen = len + (len >> 1); // but above intended maximum, slow to increase by 25% if (newLen > MAX_SEGMENT_LEN) { newLen = len + (len >> 2); } return (_currentSegment = Arrays.copyOf(curr, newLen)); }
public char [ ] expandCurrentSegment ( ) { final char [ ] curr = _currentSegment ; final int len = curr . length ; int newLen = len + ( len >> 1 ) ; if ( newLen > MAX_SEGMENT_LEN ) { newLen = len + ( len >> 2 ) ; } return ( _currentSegment = Arrays . copyOf ( curr , newLen ) ) ; }
Mockito
12
src/org/mockito/internal/util/reflection/GenericMaster.java
16
25
@Captor annotation might lead to NPE during autounboxing when capturing primitives
Say I have a @Captor ArgumentCaptor<Double> fooCaptor; I'd like to have it capture primitive double arguments, but the annotation gives me an Object captor. Unfortunately this also fails in a non-obvious way. I get a NullPointerException because fooCaptor.capture() returns an Object, which obviously cannot be unboxed to a double. It might be nice if @Captor had an optional parameter for specifying the class. This would make one say the type twice in these cases (curse you, erasure!), but otherwise one would be forced to remove the @Captor annotation, which I'd personally consider even more ugly.
public Class getGenericType(Field field) { Type generic = field.getGenericType(); if (generic != null && generic instanceof ParameterizedType) { Type actual = ((ParameterizedType) generic).getActualTypeArguments()[0]; return (Class) actual; //in case of nested generics we don't go deep } return Object.class; }
public Class getGenericType ( Field field ) { Type generic = field . getGenericType ( ) ; if ( generic != null && generic instanceof ParameterizedType ) { Type actual = ( ( ParameterizedType ) generic ) . getActualTypeArguments ( ) [ 0 ] ; return ( Class ) actual ; } return Object . class ; }
public Class getGenericType(Field field) { Type generic = field.getGenericType(); if (generic != null && generic instanceof ParameterizedType) { Type actual = ((ParameterizedType) generic).getActualTypeArguments()[0]; if (actual instanceof Class) { return (Class) actual; } else if (actual instanceof ParameterizedType) { //in case of nested generics we don't go deep return (Class) ((ParameterizedType) actual).getRawType(); } } return Object.class; }
public Class getGenericType ( Field field ) { Type generic = field . getGenericType ( ) ; if ( generic != null && generic instanceof ParameterizedType ) { Type actual = ( ( ParameterizedType ) generic ) . getActualTypeArguments ( ) [ 0 ] ; if ( actual instanceof Class ) { return ( Class ) actual ; } else if ( actual instanceof ParameterizedType ) { return ( Class ) ( ( ParameterizedType ) actual ) . getRawType ( ) ; } } return Object . class ; }
JacksonDatabind
91
src/main/java/com/fasterxml/jackson/databind/deser/DeserializerCache.java
536
546
2.9.2 deserialization regression
There seems to be a regression in the latest 2.9.2 release. Using `org.apache.logging.log4j.core.jackson.Log4jJsonObjectMapper` from `org.apache.logging.log4j:log4j-core:2.9.1` to deserialize the appended JSON object is throwing an exception with 2.9.2 but worked with 2.9.1. `org.apache.logging.log4j.core.jackson.Log4jYamlObjectMapper` and `org.apache.logging.log4j.core.jackson.Log4jXmlObjectMapper` fail in similar ways. ### inputString ```json { "timeMillis" : 1493121664118, "thread" : "main", "threadId" : 1, "threadPriority" : 5, "level" : "INFO", "loggerName" : "HelloWorld", "marker" : { "name" : "child", "parents" : [ { "name" : "parent", "parents" : [ { "name" : "grandparent" } ] } ] }, "message" : "Hello, world!", "thrown" : { "commonElementCount" : 0, "message" : "error message", "name" : "java.lang.RuntimeException", "extendedStackTrace" : [ { "class" : "logtest.Main", "method" : "main", "file" : "Main.java", "line" : 29, "exact" : true, "location" : "classes/", "version" : "?" } ] }, "contextStack" : [ "one", "two" ], "loggerFqcn" : "org.apache.logging.log4j.spi.AbstractLogger", "endOfBatch" : false, "contextMap" : { "bar" : "BAR", "foo" : "FOO" }, "source" : { "class" : "logtest.Main", "method" : "main", "file" : "Main.java", "line" : 29 } } ``` ### Exception ``` org.apache.logging.log4j.core.parser.ParseException: com.fasterxml.jackson.databind.exc.MismatchedInputException: Cannot construct instance of `org.apache.logging.log4j.Level` (although at least one Creator exists): no String-argument constructor/factory method to deserialize from String value ('INFO') at [Source: (byte[])"{ "timeMillis" : 1493121664118, "thread" : "main", "threadId" : 1, "threadPriority" : 5, "level" : "INFO", "loggerName" : "HelloWorld", "marker" : { "name" : "child", "parents" : [ { "name" : "parent", "parents" : [ { "name" : "grandparent" } ] } ] }, "message" : "Hello, world!", "thrown" : { "commonElementCount" : 0, "message" : "error message", "name" : "java.lang.RuntimeException", "extendedStackTrace" : [ { "clas"[truncated 482 bytes]; line: 6, column: 13] (through reference chain: org.apache.logging.log4j.core.impl.Log4jLogEvent["level"]) ``` ### parsing pseudo code ```java import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.parser.LogEventParser; import org.apache.logging.log4j.core.parser.JsonLogEventParser; import java.nio.charset.StandardCharsets; LogEventParser parser = new JsonLogEventParser(); LogEvent result = parser.parseFrom(inputString.getBytes(StandardCharsets.UTF_8)); assert result != null; ```
private boolean _hasCustomHandlers(JavaType t) { if (t.isContainerType()) { // First: value types may have both value and type handlers JavaType ct = t.getContentType(); if (ct != null) { return (ct.getValueHandler() != null) || (ct.getTypeHandler() != null); // Second: map(-like) types may have value handler for key (but not type; keys are untyped) } } return false; }
private boolean _hasCustomHandlers ( JavaType t ) { if ( t . isContainerType ( ) ) { JavaType ct = t . getContentType ( ) ; if ( ct != null ) { return ( ct . getValueHandler ( ) != null ) || ( ct . getTypeHandler ( ) != null ) ; } } return false ; }
private boolean _hasCustomHandlers(JavaType t) { if (t.isContainerType()) { // First: value types may have both value and type handlers JavaType ct = t.getContentType(); if (ct != null) { if ((ct.getValueHandler() != null) || (ct.getTypeHandler() != null)) { return true; } } // Second: map(-like) types may have value handler for key (but not type; keys are untyped) if (t.isMapLikeType()) { JavaType kt = t.getKeyType(); if (kt.getValueHandler() != null) { return true; } } } return false; }
private boolean _hasCustomHandlers ( JavaType t ) { if ( t . isContainerType ( ) ) { JavaType ct = t . getContentType ( ) ; if ( ct != null ) { if ( ( ct . getValueHandler ( ) != null ) || ( ct . getTypeHandler ( ) != null ) ) { return true ; } } if ( t . isMapLikeType ( ) ) { JavaType kt = t . getKeyType ( ) ; if ( kt . getValueHandler ( ) != null ) { return true ; } } } return false ; }
Compress
14
src/main/java/org/apache/commons/compress/archivers/tar/TarUtils.java
56
112
Tar files created by AIX native tar, and which contain symlinks, cannot be read by TarArchiveInputStream
A simple tar file created on AIX using the native ({{/usr/bin/tar}} tar utility) *and* which contains a symbolic link, cannot be loaded by TarArchiveInputStream: {noformat} java.io.IOException: Error detected parsing the header at org.apache.commons.compress.archivers.tar.TarArchiveInputStream.getNextTarEntry(TarArchiveInputStream.java:201) at Extractor.extract(Extractor.java:13) at Extractor.main(Extractor.java:28) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) at java.lang.reflect.Method.invoke(Method.java:597) at org.apache.tools.ant.taskdefs.ExecuteJava.run(ExecuteJava.java:217) at org.apache.tools.ant.taskdefs.ExecuteJava.execute(ExecuteJava.java:152) at org.apache.tools.ant.taskdefs.Java.run(Java.java:771) at org.apache.tools.ant.taskdefs.Java.executeJava(Java.java:221) at org.apache.tools.ant.taskdefs.Java.executeJava(Java.java:135) at org.apache.tools.ant.taskdefs.Java.execute(Java.java:108) at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:291) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) at java.lang.reflect.Method.invoke(Method.java:597) at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106) at org.apache.tools.ant.Task.perform(Task.java:348) at org.apache.tools.ant.Target.execute(Target.java:390) at org.apache.tools.ant.Target.performTasks(Target.java:411) at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1399) at org.apache.tools.ant.Project.executeTarget(Project.java:1368) at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41) at org.apache.tools.ant.Project.executeTargets(Project.java:1251) at org.apache.tools.ant.Main.runBuild(Main.java:809) at org.apache.tools.ant.Main.startAnt(Main.java:217) at org.apache.tools.ant.launch.Launcher.run(Launcher.java:280) at org.apache.tools.ant.launch.Launcher.main(Launcher.java:109) Caused by: java.lang.IllegalArgumentException: Invalid byte 0 at offset 0 in '{NUL}1722000726 ' len=12 at org.apache.commons.compress.archivers.tar.TarUtils.parseOctal(TarUtils.java:99) at org.apache.commons.compress.archivers.tar.TarArchiveEntry.parseTarHeader(TarArchiveEntry.java:819) at org.apache.commons.compress.archivers.tar.TarArchiveEntry.<init>(TarArchiveEntry.java:314) at org.apache.commons.compress.archivers.tar.TarArchiveInputStream.getNextTarEntry(TarArchiveInputStream.java:199) ... 29 more {noformat} Tested with 1.2 and the 1.4 nightly build from Feb 23 ({{Implementation-Build: trunk@r1292625; 2012-02-23 03:20:30+0000}})
public static long parseOctal(final byte[] buffer, final int offset, final int length) { long result = 0; int end = offset + length; int start = offset; if (length < 2){ throw new IllegalArgumentException("Length "+length+" must be at least 2"); } boolean allNUL = true; for (int i = start; i < end; i++){ if (buffer[i] != 0){ allNUL = false; break; } } if (allNUL) { return 0L; } // Skip leading spaces while (start < end){ if (buffer[start] == ' '){ start++; } else { break; } } // Must have trailing NUL or space byte trailer; trailer = buffer[end-1]; if (trailer == 0 || trailer == ' '){ end--; } else { throw new IllegalArgumentException( exceptionMessage(buffer, offset, length, end-1, trailer)); } // May have additional NUL or space trailer = buffer[end-1]; if (trailer == 0 || trailer == ' '){ end--; } for ( ;start < end; start++) { final byte currentByte = buffer[start]; // CheckStyle:MagicNumber OFF if (currentByte < '0' || currentByte > '7'){ throw new IllegalArgumentException( exceptionMessage(buffer, offset, length, start, currentByte)); } result = (result << 3) + (currentByte - '0'); // convert from ASCII // CheckStyle:MagicNumber ON } return result; }
public static long parseOctal ( final byte [ ] buffer , final int offset , final int length ) { long result = 0 ; int end = offset + length ; int start = offset ; if ( length < 2 ) { throw new IllegalArgumentException ( "Length " + length + " must be at least 2" ) ; } boolean allNUL = true ; for ( int i = start ; i < end ; i ++ ) { if ( buffer [ i ] != 0 ) { allNUL = false ; break ; } } if ( allNUL ) { return 0L ; } while ( start < end ) { if ( buffer [ start ] == ' ' ) { start ++ ; } else { break ; } } byte trailer ; trailer = buffer [ end - 1 ] ; if ( trailer == 0 || trailer == ' ' ) { end -- ; } else { throw new IllegalArgumentException ( exceptionMessage ( buffer , offset , length , end - 1 , trailer ) ) ; } trailer = buffer [ end - 1 ] ; if ( trailer == 0 || trailer == ' ' ) { end -- ; } for ( ; start < end ; start ++ ) { final byte currentByte = buffer [ start ] ; if ( currentByte < '0' || currentByte > '7' ) { throw new IllegalArgumentException ( exceptionMessage ( buffer , offset , length , start , currentByte ) ) ; } result = ( result << 3 ) + ( currentByte - '0' ) ; } return result ; }
public static long parseOctal(final byte[] buffer, final int offset, final int length) { long result = 0; int end = offset + length; int start = offset; if (length < 2){ throw new IllegalArgumentException("Length "+length+" must be at least 2"); } if (buffer[start] == 0) { return 0L; } // Skip leading spaces while (start < end){ if (buffer[start] == ' '){ start++; } else { break; } } // Must have trailing NUL or space byte trailer; trailer = buffer[end-1]; if (trailer == 0 || trailer == ' '){ end--; } else { throw new IllegalArgumentException( exceptionMessage(buffer, offset, length, end-1, trailer)); } // May have additional NUL or space trailer = buffer[end-1]; if (trailer == 0 || trailer == ' '){ end--; } for ( ;start < end; start++) { final byte currentByte = buffer[start]; // CheckStyle:MagicNumber OFF if (currentByte < '0' || currentByte > '7'){ throw new IllegalArgumentException( exceptionMessage(buffer, offset, length, start, currentByte)); } result = (result << 3) + (currentByte - '0'); // convert from ASCII // CheckStyle:MagicNumber ON } return result; }
public static long parseOctal ( final byte [ ] buffer , final int offset , final int length ) { long result = 0 ; int end = offset + length ; int start = offset ; if ( length < 2 ) { throw new IllegalArgumentException ( "Length " + length + " must be at least 2" ) ; } if ( buffer [ start ] == 0 ) { return 0L ; } while ( start < end ) { if ( buffer [ start ] == ' ' ) { start ++ ; } else { break ; } } byte trailer ; trailer = buffer [ end - 1 ] ; if ( trailer == 0 || trailer == ' ' ) { end -- ; } else { throw new IllegalArgumentException ( exceptionMessage ( buffer , offset , length , end - 1 , trailer ) ) ; } trailer = buffer [ end - 1 ] ; if ( trailer == 0 || trailer == ' ' ) { end -- ; } for ( ; start < end ; start ++ ) { final byte currentByte = buffer [ start ] ; if ( currentByte < '0' || currentByte > '7' ) { throw new IllegalArgumentException ( exceptionMessage ( buffer , offset , length , start , currentByte ) ) ; } result = ( result << 3 ) + ( currentByte - '0' ) ; } return result ; }
Csv
11
src/main/java/org/apache/commons/csv/CSVParser.java
359
394
NullPointerException when empty header string and and null string of ""
When setting the format to have a nullString of "" and having an empty header value, a nullPointerException is thrown.
private Map<String, Integer> initializeHeader() throws IOException { Map<String, Integer> hdrMap = null; final String[] formatHeader = this.format.getHeader(); if (formatHeader != null) { hdrMap = new LinkedHashMap<String, Integer>(); String[] headerRecord = null; if (formatHeader.length == 0) { // read the header from the first line of the file final CSVRecord nextRecord = this.nextRecord(); if (nextRecord != null) { headerRecord = nextRecord.values(); } } else { if (this.format.getSkipHeaderRecord()) { this.nextRecord(); } headerRecord = formatHeader; } // build the name to index mappings if (headerRecord != null) { for (int i = 0; i < headerRecord.length; i++) { final String header = headerRecord[i]; final boolean containsHeader = hdrMap.containsKey(header); final boolean emptyHeader = header.trim().isEmpty(); if (containsHeader && (!emptyHeader || (emptyHeader && !this.format.getIgnoreEmptyHeaders()))) { throw new IllegalArgumentException("The header contains a duplicate name: \"" + header + "\" in " + Arrays.toString(headerRecord)); } hdrMap.put(header, Integer.valueOf(i)); } } } return hdrMap; }
private Map < String , Integer > initializeHeader ( ) throws IOException { Map < String , Integer > hdrMap = null ; final String [ ] formatHeader = this . format . getHeader ( ) ; if ( formatHeader != null ) { hdrMap = new LinkedHashMap < String , Integer > ( ) ; String [ ] headerRecord = null ; if ( formatHeader . length == 0 ) { final CSVRecord nextRecord = this . nextRecord ( ) ; if ( nextRecord != null ) { headerRecord = nextRecord . values ( ) ; } } else { if ( this . format . getSkipHeaderRecord ( ) ) { this . nextRecord ( ) ; } headerRecord = formatHeader ; } if ( headerRecord != null ) { for ( int i = 0 ; i < headerRecord . length ; i ++ ) { final String header = headerRecord [ i ] ; final boolean containsHeader = hdrMap . containsKey ( header ) ; final boolean emptyHeader = header . trim ( ) . isEmpty ( ) ; if ( containsHeader && ( ! emptyHeader || ( emptyHeader && ! this . format . getIgnoreEmptyHeaders ( ) ) ) ) { throw new IllegalArgumentException ( "The header contains a duplicate name: \"" + header + "\" in " + Arrays . toString ( headerRecord ) ) ; } hdrMap . put ( header , Integer . valueOf ( i ) ) ; } } } return hdrMap ; }
private Map<String, Integer> initializeHeader() throws IOException { Map<String, Integer> hdrMap = null; final String[] formatHeader = this.format.getHeader(); if (formatHeader != null) { hdrMap = new LinkedHashMap<String, Integer>(); String[] headerRecord = null; if (formatHeader.length == 0) { // read the header from the first line of the file final CSVRecord nextRecord = this.nextRecord(); if (nextRecord != null) { headerRecord = nextRecord.values(); } } else { if (this.format.getSkipHeaderRecord()) { this.nextRecord(); } headerRecord = formatHeader; } // build the name to index mappings if (headerRecord != null) { for (int i = 0; i < headerRecord.length; i++) { final String header = headerRecord[i]; final boolean containsHeader = hdrMap.containsKey(header); final boolean emptyHeader = header == null || header.trim().isEmpty(); if (containsHeader && (!emptyHeader || (emptyHeader && !this.format.getIgnoreEmptyHeaders()))) { throw new IllegalArgumentException("The header contains a duplicate name: \"" + header + "\" in " + Arrays.toString(headerRecord)); } hdrMap.put(header, Integer.valueOf(i)); } } } return hdrMap; }
private Map < String , Integer > initializeHeader ( ) throws IOException { Map < String , Integer > hdrMap = null ; final String [ ] formatHeader = this . format . getHeader ( ) ; if ( formatHeader != null ) { hdrMap = new LinkedHashMap < String , Integer > ( ) ; String [ ] headerRecord = null ; if ( formatHeader . length == 0 ) { final CSVRecord nextRecord = this . nextRecord ( ) ; if ( nextRecord != null ) { headerRecord = nextRecord . values ( ) ; } } else { if ( this . format . getSkipHeaderRecord ( ) ) { this . nextRecord ( ) ; } headerRecord = formatHeader ; } if ( headerRecord != null ) { for ( int i = 0 ; i < headerRecord . length ; i ++ ) { final String header = headerRecord [ i ] ; final boolean containsHeader = hdrMap . containsKey ( header ) ; final boolean emptyHeader = header == null || header . trim ( ) . isEmpty ( ) ; if ( containsHeader && ( ! emptyHeader || ( emptyHeader && ! this . format . getIgnoreEmptyHeaders ( ) ) ) ) { throw new IllegalArgumentException ( "The header contains a duplicate name: \"" + header + "\" in " + Arrays . toString ( headerRecord ) ) ; } hdrMap . put ( header , Integer . valueOf ( i ) ) ; } } } return hdrMap ; }
Csv
2
src/main/java/org/apache/commons/csv/CSVRecord.java
79
86
CSVRecord does not verify that the length of the header mapping matches the number of values
CSVRecord does not verify that the size of the header mapping matches the number of values. The following test will produce a ArrayOutOfBoundsException: {code} @Test public void testInvalidHeaderTooLong() throws Exception { final CSVParser parser = new CSVParser("a,b", CSVFormat.newBuilder().withHeader("A", "B", "C").build()); final CSVRecord record = parser.iterator().next(); record.get("C"); } {code}
public String get(final String name) { if (mapping == null) { throw new IllegalStateException( "No header mapping was specified, the record values can't be accessed by name"); } final Integer index = mapping.get(name); return index != null ? values[index.intValue()] : null; }
public String get ( final String name ) { if ( mapping == null ) { throw new IllegalStateException ( "No header mapping was specified, the record values can't be accessed by name" ) ; } final Integer index = mapping . get ( name ) ; return index != null ? values [ index . intValue ( ) ] : null ; }
public String get(final String name) { if (mapping == null) { throw new IllegalStateException( "No header mapping was specified, the record values can't be accessed by name"); } final Integer index = mapping.get(name); try { return index != null ? values[index.intValue()] : null; } catch (ArrayIndexOutOfBoundsException e) { throw new IllegalArgumentException( String.format( "Index for header '%s' is %d but CSVRecord only has %d values!", name, index.intValue(), values.length)); } }
public String get ( final String name ) { if ( mapping == null ) { throw new IllegalStateException ( "No header mapping was specified, the record values can't be accessed by name" ) ; } final Integer index = mapping . get ( name ) ; try { return index != null ? values [ index . intValue ( ) ] : null ; } catch ( ArrayIndexOutOfBoundsException e ) { throw new IllegalArgumentException ( String . format ( "Index for header '%s' is %d but CSVRecord only has %d values!" , name , index . intValue ( ) , values . length ) ) ; } }
JacksonXml
4
src/main/java/com/fasterxml/jackson/dataformat/xml/ser/XmlSerializerProvider.java
200
208
`XmlSerializerProvider` does not use `withRootName` config for null
In `jackson-dataformat-xml/src/main/java/com/fasterxml/jackson/dataformat/xml/ser/XmlSerializerProvider.java` Line 203, I think `_rootNameFromConfig()` should be used if available instead of `ROOT_NAME_FOR_NULL`, so that `withRootName()` config can be used. I don't know whether/how deser would be affected https://github.com/FasterXML/jackson-dataformat-xml/blob/ca1c671c419e88a18357d497ec3671c73c37452e/src/main/java/com/fasterxml/jackson/dataformat/xml/ser/XmlSerializerProvider.java#L203
protected void _serializeXmlNull(JsonGenerator jgen) throws IOException { // 14-Nov-2016, tatu: As per [dataformat-xml#213], we may have explicitly // configured root name... if (jgen instanceof ToXmlGenerator) { _initWithRootName((ToXmlGenerator) jgen, ROOT_NAME_FOR_NULL); } super.serializeValue(jgen, null); }
protected void _serializeXmlNull ( JsonGenerator jgen ) throws IOException { if ( jgen instanceof ToXmlGenerator ) { _initWithRootName ( ( ToXmlGenerator ) jgen , ROOT_NAME_FOR_NULL ) ; } super . serializeValue ( jgen , null ) ; }
protected void _serializeXmlNull(JsonGenerator jgen) throws IOException { // 14-Nov-2016, tatu: As per [dataformat-xml#213], we may have explicitly // configured root name... QName rootName = _rootNameFromConfig(); if (rootName == null) { rootName = ROOT_NAME_FOR_NULL; } if (jgen instanceof ToXmlGenerator) { _initWithRootName((ToXmlGenerator) jgen, rootName); } super.serializeValue(jgen, null); }
protected void _serializeXmlNull ( JsonGenerator jgen ) throws IOException { QName rootName = _rootNameFromConfig ( ) ; if ( rootName == null ) { rootName = ROOT_NAME_FOR_NULL ; } if ( jgen instanceof ToXmlGenerator ) { _initWithRootName ( ( ToXmlGenerator ) jgen , rootName ) ; } super . serializeValue ( jgen , null ) ; }
Codec
7
src/java/org/apache/commons/codec/binary/Base64.java
669
671
Base64.encodeBase64String() shouldn't chunk
Base64.encodeBase64String() shouldn't chunk. Change this: {code} public static String encodeBase64String(byte[] binaryData) { return StringUtils.newStringUtf8(encodeBase64(binaryData, true)); } {code} To this: {code} public static String encodeBase64String(byte[] binaryData) { return StringUtils.newStringUtf8(encodeBase64(binaryData, false)); } {code} This will fix the following tests ggregory added a few minutes ago: //assertEquals("Zg==", Base64.encodeBase64String(StringUtils.getBytesUtf8("f"))); //assertEquals("Zm8=", Base64.encodeBase64String(StringUtils.getBytesUtf8("fo"))); //assertEquals("Zm9v", Base64.encodeBase64String(StringUtils.getBytesUtf8("foo"))); //assertEquals("Zm9vYg==", Base64.encodeBase64String(StringUtils.getBytesUtf8("foob"))); //assertEquals("Zm9vYmE=", Base64.encodeBase64String(StringUtils.getBytesUtf8("fooba"))); //assertEquals("Zm9vYmFy", Base64.encodeBase64String(StringUtils.getBytesUtf8("foobar")));
public static String encodeBase64String(byte[] binaryData) { return StringUtils.newStringUtf8(encodeBase64(binaryData, true)); }
public static String encodeBase64String ( byte [ ] binaryData ) { return StringUtils . newStringUtf8 ( encodeBase64 ( binaryData , true ) ) ; }
public static String encodeBase64String(byte[] binaryData) { return StringUtils.newStringUtf8(encodeBase64(binaryData, false)); }
public static String encodeBase64String ( byte [ ] binaryData ) { return StringUtils . newStringUtf8 ( encodeBase64 ( binaryData , false ) ) ; }
Compress
34
src/main/java/org/apache/commons/compress/archivers/zip/X7875
145
147
Exception in X7875_NewUnix.parseFromLocalFileData when parsing 0-sized "ux" local entry
When trying to detect content type of a zip file with Tika 1.10 (which uses Commons Compress 1.9 internally) in manner like this: {code} byte[] content = ... // whole zip file. String name = "TR_01.ZIP"; Tika tika = new Tika(); return tika.detect(content, name); {code} it throws an exception: {code} java.lang.ArrayIndexOutOfBoundsException: 13 at org.apache.commons.compress.archivers.zip.X7875_NewUnix.parseFromLocalFileData(X7875_NewUnix.java:199) at org.apache.commons.compress.archivers.zip.X7875_NewUnix.parseFromCentralDirectoryData(X7875_NewUnix.java:220) at org.apache.commons.compress.archivers.zip.ExtraFieldUtils.parse(ExtraFieldUtils.java:174) at org.apache.commons.compress.archivers.zip.ZipArchiveEntry.setCentralDirectoryExtra(ZipArchiveEntry.java:476) at org.apache.commons.compress.archivers.zip.ZipFile.readCentralDirectoryEntry(ZipFile.java:575) at org.apache.commons.compress.archivers.zip.ZipFile.populateFromCentralDirectory(ZipFile.java:492) at org.apache.commons.compress.archivers.zip.ZipFile.<init>(ZipFile.java:216) at org.apache.commons.compress.archivers.zip.ZipFile.<init>(ZipFile.java:192) at org.apache.commons.compress.archivers.zip.ZipFile.<init>(ZipFile.java:153) at org.apache.tika.parser.pkg.ZipContainerDetector.detectZipFormat(ZipContainerDetector.java:141) at org.apache.tika.parser.pkg.ZipContainerDetector.detect(ZipContainerDetector.java:88) at org.apache.tika.detect.CompositeDetector.detect(CompositeDetector.java:77) at org.apache.tika.Tika.detect(Tika.java:155) at org.apache.tika.Tika.detect(Tika.java:183) at org.apache.tika.Tika.detect(Tika.java:223) {code} The zip file does contain two .jpg images and is not a "special" (JAR, Openoffice, ... ) zip file. Unfortunately, the contents of the zip file is confidential and so I cannot attach it to this ticket as it is, although I can provide the parameters supplied to org.apache.commons.compress.archivers.zip.X7875_NewUnix.parseFromLocalFileData(X7875_NewUnix.java:199) as caught by the debugger: {code} data = {byte[13]@2103} 0 = 85 1 = 84 2 = 5 3 = 0 4 = 7 5 = -112 6 = -108 7 = 51 8 = 85 9 = 117 10 = 120 11 = 0 12 = 0 offset = 13 length = 0 {code} This data comes from the local zip entry for the first file, it seems the method tries to read more bytes than is actually available in the buffer. It seems that first 9 bytes of the buffer are 'UT' extended field with timestamp, followed by 0-sized 'ux' field (bytes 9-12) that is supposed to contain UID/GID - according to infozip's doc the 0-size is common for global dictionary, but the local dictionary should contain complete data. In this case for some reason it does contain 0-sized data. Note that 7zip and unzip can unzip the file without even a warning, so Commons Compress should be also able to handle that file correctly without choking on that exception.
public ZipShort getCentralDirectoryLength() { return getLocalFileDataLength(); }
public ZipShort getCentralDirectoryLength ( ) { return getLocalFileDataLength ( ) ; }
public ZipShort getCentralDirectoryLength() { return ZERO; }
public ZipShort getCentralDirectoryLength ( ) { return ZERO ; }
JacksonCore
8
src/main/java/com/fasterxml/jackson/core/util/TextBuffer.java
298
310
Inconsistent TextBuffer#getTextBuffer behavior
Hi, I'm using 2.4.2. While I'm working on CBORParser, I noticed that CBORParser#getTextCharacters() returns sometimes `null` sometimes `[]` (empty array) when it's parsing empty string `""`. While debugging, I noticed that TextBuffer#getTextBuffer behaves inconsistently. ``` TextBuffer buffer = new TextBuffer(new BufferRecycler()); buffer.resetWithEmpty(); buffer.getTextBuffer(); // returns null buffer.contentsAsString(); // returns empty string "" buffer.getTextBuffer(); // returns empty array [] ``` I think getTextBuffer should return the same value. Not sure which (`null` or `[]`) is expected though.
public char[] getTextBuffer() { // Are we just using shared input buffer? if (_inputStart >= 0) return _inputBuffer; if (_resultArray != null) return _resultArray; if (_resultString != null) { return (_resultArray = _resultString.toCharArray()); } // Nope; but does it fit in just one segment? if (!_hasSegments) return _currentSegment; // Nope, need to have/create a non-segmented array and return it return contentsAsArray(); }
public char [ ] getTextBuffer ( ) { if ( _inputStart >= 0 ) return _inputBuffer ; if ( _resultArray != null ) return _resultArray ; if ( _resultString != null ) { return ( _resultArray = _resultString . toCharArray ( ) ) ; } if ( ! _hasSegments ) return _currentSegment ; return contentsAsArray ( ) ; }
public char[] getTextBuffer() { // Are we just using shared input buffer? if (_inputStart >= 0) return _inputBuffer; if (_resultArray != null) return _resultArray; if (_resultString != null) { return (_resultArray = _resultString.toCharArray()); } // Nope; but does it fit in just one segment? if (!_hasSegments && _currentSegment != null) return _currentSegment; // Nope, need to have/create a non-segmented array and return it return contentsAsArray(); }
public char [ ] getTextBuffer ( ) { if ( _inputStart >= 0 ) return _inputBuffer ; if ( _resultArray != null ) return _resultArray ; if ( _resultString != null ) { return ( _resultArray = _resultString . toCharArray ( ) ) ; } if ( ! _hasSegments && _currentSegment != null ) return _currentSegment ; return contentsAsArray ( ) ; }
Math
95
src/java/org/apache/commons/math/distribution/FDistributionImpl.java
143
149
denominatorDegreeOfFreedom in FDistribution leads to IllegalArgumentsException in UnivariateRealSolverUtils.bracket
We are using the FDistributionImpl from the commons.math project to do some statistical calculations, namely receiving the upper and lower boundaries of a confidence interval. Everything is working fine and the results are matching our reference calculations. However, the FDistribution behaves strange if a denominatorDegreeOfFreedom of 2 is used, with an alpha-value of 0.95. This results in an IllegalArgumentsException, stating: Invalid endpoint parameters: lowerBound=0.0 initial=Infinity upperBound=1.7976931348623157E308 coming from org.apache.commons.math.analysis.UnivariateRealSolverUtils.bracket The problem is the 'initial' parameter to that function, wich is POSITIVE_INFINITY and therefore not within the boundaries. I already pinned down the problem to the FDistributions getInitialDomain()-method, wich goes like: return getDenominatorDegreesOfFreedom() / (getDenominatorDegreesOfFreedom() - 2.0); Obviously, in case of denominatorDegreesOfFreedom == 2, this must lead to a division-by-zero, resulting in POSTIVE_INFINITY. The result of this operation is then directly passed into the UnivariateRealSolverUtils.bracket() - method as second argument.
protected double getInitialDomain(double p) { double ret; double d = getDenominatorDegreesOfFreedom(); // use mean ret = d / (d - 2.0); return ret; }
protected double getInitialDomain ( double p ) { double ret ; double d = getDenominatorDegreesOfFreedom ( ) ; ret = d / ( d - 2.0 ) ; return ret ; }
protected double getInitialDomain(double p) { double ret = 1.0; double d = getDenominatorDegreesOfFreedom(); if (d > 2.0) { // use mean ret = d / (d - 2.0); } return ret; }
protected double getInitialDomain ( double p ) { double ret = 1.0 ; double d = getDenominatorDegreesOfFreedom ( ) ; if ( d > 2.0 ) { ret = d / ( d - 2.0 ) ; } return ret ; }
Mockito
32
src/org/mockito/internal/configuration/SpyAnnotationEngine.java
27
58
the field name not used to identify @Spy objects
spy objects created with @Spy annotation are note named against the field name, as mocks created with @Mock. I'd like to have this feature to use @InjectMocks with two spy objects of the same type.
@SuppressWarnings("deprecation") public void process(Class<?> context, Object testClass) { Field[] fields = context.getDeclaredFields(); for (Field field : fields) { if (field.isAnnotationPresent(Spy.class)) { assertNoAnnotations(Spy.class, field, Mock.class, org.mockito.MockitoAnnotations.Mock.class, Captor.class); boolean wasAccessible = field.isAccessible(); field.setAccessible(true); try { Object instance = field.get(testClass); if (instance == null) { throw new MockitoException("Cannot create a @Spy for '" + field.getName() + "' field because the *instance* is missing\n" + "The instance must be created *before* initMocks();\n" + "Example of correct usage of @Spy:\n" + " @Spy List mock = new LinkedList();\n" + " //also, don't forget about MockitoAnnotations.initMocks();"); } if (new MockUtil().isMock(instance)) { // instance has been spied earlier Mockito.reset(instance); } else { field.set(testClass, Mockito.spy(instance)); } } catch (IllegalAccessException e) { throw new MockitoException("Problems initiating spied field " + field.getName(), e); } finally { field.setAccessible(wasAccessible); } } } }
@ SuppressWarnings ( "deprecation" ) public void process ( Class < ? > context , Object testClass ) { Field [ ] fields = context . getDeclaredFields ( ) ; for ( Field field : fields ) { if ( field . isAnnotationPresent ( Spy . class ) ) { assertNoAnnotations ( Spy . class , field , Mock . class , org . mockito . MockitoAnnotations . Mock . class , Captor . class ) ; boolean wasAccessible = field . isAccessible ( ) ; field . setAccessible ( true ) ; try { Object instance = field . get ( testClass ) ; if ( instance == null ) { throw new MockitoException ( "Cannot create a @Spy for '" + field . getName ( ) + "' field because the *instance* is missing\n" + "The instance must be created *before* initMocks();\n" + "Example of correct usage of @Spy:\n" + " @Spy List mock = new LinkedList();\n" + " //also, don't forget about MockitoAnnotations.initMocks();" ) ; } if ( new MockUtil ( ) . isMock ( instance ) ) { Mockito . reset ( instance ) ; } else { field . set ( testClass , Mockito . spy ( instance ) ) ; } } catch ( IllegalAccessException e ) { throw new MockitoException ( "Problems initiating spied field " + field . getName ( ) , e ) ; } finally { field . setAccessible ( wasAccessible ) ; } } } }
@SuppressWarnings("deprecation") public void process(Class<?> context, Object testClass) { Field[] fields = context.getDeclaredFields(); for (Field field : fields) { if (field.isAnnotationPresent(Spy.class)) { assertNoAnnotations(Spy.class, field, Mock.class, org.mockito.MockitoAnnotations.Mock.class, Captor.class); boolean wasAccessible = field.isAccessible(); field.setAccessible(true); try { Object instance = field.get(testClass); if (instance == null) { throw new MockitoException("Cannot create a @Spy for '" + field.getName() + "' field because the *instance* is missing\n" + "The instance must be created *before* initMocks();\n" + "Example of correct usage of @Spy:\n" + " @Spy List mock = new LinkedList();\n" + " //also, don't forget about MockitoAnnotations.initMocks();"); } if (new MockUtil().isMock(instance)) { // instance has been spied earlier Mockito.reset(instance); } else { field.set(testClass, Mockito.mock(instance.getClass(), withSettings() .spiedInstance(instance) .defaultAnswer(Mockito.CALLS_REAL_METHODS) .name(field.getName()))); } } catch (IllegalAccessException e) { throw new MockitoException("Problems initiating spied field " + field.getName(), e); } finally { field.setAccessible(wasAccessible); } } } }
@ SuppressWarnings ( "deprecation" ) public void process ( Class < ? > context , Object testClass ) { Field [ ] fields = context . getDeclaredFields ( ) ; for ( Field field : fields ) { if ( field . isAnnotationPresent ( Spy . class ) ) { assertNoAnnotations ( Spy . class , field , Mock . class , org . mockito . MockitoAnnotations . Mock . class , Captor . class ) ; boolean wasAccessible = field . isAccessible ( ) ; field . setAccessible ( true ) ; try { Object instance = field . get ( testClass ) ; if ( instance == null ) { throw new MockitoException ( "Cannot create a @Spy for '" + field . getName ( ) + "' field because the *instance* is missing\n" + "The instance must be created *before* initMocks();\n" + "Example of correct usage of @Spy:\n" + " @Spy List mock = new LinkedList();\n" + " //also, don't forget about MockitoAnnotations.initMocks();" ) ; } if ( new MockUtil ( ) . isMock ( instance ) ) { Mockito . reset ( instance ) ; } else { field . set ( testClass , Mockito . mock ( instance . getClass ( ) , withSettings ( ) . spiedInstance ( instance ) . defaultAnswer ( Mockito . CALLS_REAL_METHODS ) . name ( field . getName ( ) ) ) ) ; } } catch ( IllegalAccessException e ) { throw new MockitoException ( "Problems initiating spied field " + field . getName ( ) , e ) ; } finally { field . setAccessible ( wasAccessible ) ; } } } }
JacksonDatabind
33
src/main/java/com/fasterxml/jackson/databind/introspect/JacksonAnnotationIntrospector.java
730
755
@JsonUnwrapped is not treated as assuming @JsonProperty("")
See discussion [here](https://groups.google.com/forum/#!topic/jackson-user/QLpWb8YzIoE) but basically `@JsonUnwrapped` on a private field by itself does not cause that field to be serialized, currently, You need to add an explicit `@JsonProperty`. You shouldn't have to do that. (Following test fails currently, should pass, though you can make it pass by commenting out the line with `@JsonProperty`. Uses TestNG and AssertJ.) ``` java package com.bakins_bits; import static org.assertj.core.api.Assertions.assertThat; import org.testng.annotations.Test; import com.fasterxml.jackson.annotation.JsonUnwrapped; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; public class TestJsonUnwrappedShouldMakePrivateFieldsSerializable { public static class Inner { public String animal; } public static class Outer { // @JsonProperty @JsonUnwrapped private Inner inner; } @Test public void jsonUnwrapped_should_make_private_fields_serializable() throws JsonProcessingException { // ARRANGE Inner inner = new Inner(); inner.animal = "Zebra"; Outer outer = new Outer(); outer.inner = inner; ObjectMapper sut = new ObjectMapper(); // ACT String actual = sut.writeValueAsString(outer); // ASSERT assertThat(actual).contains("animal"); assertThat(actual).contains("Zebra"); assertThat(actual).doesNotContain("inner"); } } ```
@Override public PropertyName findNameForSerialization(Annotated a) { String name = null; JsonGetter jg = _findAnnotation(a, JsonGetter.class); if (jg != null) { name = jg.value(); } else { JsonProperty pann = _findAnnotation(a, JsonProperty.class); if (pann != null) { name = pann.value(); /* 22-Apr-2014, tatu: Should figure out a better way to do this, but * it's actually bit tricky to do it more efficiently (meta-annotations * add more lookups; AnnotationMap costs etc) */ } else if (_hasAnnotation(a, JsonSerialize.class) || _hasAnnotation(a, JsonView.class) || _hasAnnotation(a, JsonRawValue.class)) { name = ""; } else { return null; } } return PropertyName.construct(name); }
@ Override public PropertyName findNameForSerialization ( Annotated a ) { String name = null ; JsonGetter jg = _findAnnotation ( a , JsonGetter . class ) ; if ( jg != null ) { name = jg . value ( ) ; } else { JsonProperty pann = _findAnnotation ( a , JsonProperty . class ) ; if ( pann != null ) { name = pann . value ( ) ; } else if ( _hasAnnotation ( a , JsonSerialize . class ) || _hasAnnotation ( a , JsonView . class ) || _hasAnnotation ( a , JsonRawValue . class ) ) { name = "" ; } else { return null ; } } return PropertyName . construct ( name ) ; }
@Override public PropertyName findNameForSerialization(Annotated a) { String name = null; JsonGetter jg = _findAnnotation(a, JsonGetter.class); if (jg != null) { name = jg.value(); } else { JsonProperty pann = _findAnnotation(a, JsonProperty.class); if (pann != null) { name = pann.value(); /* 22-Apr-2014, tatu: Should figure out a better way to do this, but * it's actually bit tricky to do it more efficiently (meta-annotations * add more lookups; AnnotationMap costs etc) */ } else if (_hasAnnotation(a, JsonSerialize.class) || _hasAnnotation(a, JsonView.class) || _hasAnnotation(a, JsonRawValue.class) || _hasAnnotation(a, JsonUnwrapped.class) || _hasAnnotation(a, JsonBackReference.class) || _hasAnnotation(a, JsonManagedReference.class)) { name = ""; } else { return null; } } return PropertyName.construct(name); }
@ Override public PropertyName findNameForSerialization ( Annotated a ) { String name = null ; JsonGetter jg = _findAnnotation ( a , JsonGetter . class ) ; if ( jg != null ) { name = jg . value ( ) ; } else { JsonProperty pann = _findAnnotation ( a , JsonProperty . class ) ; if ( pann != null ) { name = pann . value ( ) ; } else if ( _hasAnnotation ( a , JsonSerialize . class ) || _hasAnnotation ( a , JsonView . class ) || _hasAnnotation ( a , JsonRawValue . class ) || _hasAnnotation ( a , JsonUnwrapped . class ) || _hasAnnotation ( a , JsonBackReference . class ) || _hasAnnotation ( a , JsonManagedReference . class ) ) { name = "" ; } else { return null ; } } return PropertyName . construct ( name ) ; }
Math
56
src/main/java/org/apache/commons/math/util/MultidimensionalCounter.java
216
246
MultidimensionalCounter.getCounts(int) returns wrong array of indices
MultidimensionalCounter counter = new MultidimensionalCounter(2, 4); for (Integer i : counter) { int[] x = counter.getCounts(i); System.out.println(i + " " + Arrays.toString(x)); } Output is: 0 [0, 0] 1 [0, 1] 2 [0, 2] 3 [0, 2] <=== should be [0, 3] 4 [1, 0] 5 [1, 1] 6 [1, 2] 7 [1, 2] <=== should be [1, 3]
public int[] getCounts(int index) { if (index < 0 || index >= totalSize) { throw new OutOfRangeException(index, 0, totalSize); } final int[] indices = new int[dimension]; int count = 0; for (int i = 0; i < last; i++) { int idx = 0; final int offset = uniCounterOffset[i]; while (count <= index) { count += offset; ++idx; } --idx; count -= offset; indices[i] = idx; } int idx = 1; while (count < index) { count += idx; ++idx; } --idx; indices[last] = idx; return indices; }
public int [ ] getCounts ( int index ) { if ( index < 0 || index >= totalSize ) { throw new OutOfRangeException ( index , 0 , totalSize ) ; } final int [ ] indices = new int [ dimension ] ; int count = 0 ; for ( int i = 0 ; i < last ; i ++ ) { int idx = 0 ; final int offset = uniCounterOffset [ i ] ; while ( count <= index ) { count += offset ; ++ idx ; } -- idx ; count -= offset ; indices [ i ] = idx ; } int idx = 1 ; while ( count < index ) { count += idx ; ++ idx ; } -- idx ; indices [ last ] = idx ; return indices ; }
public int[] getCounts(int index) { if (index < 0 || index >= totalSize) { throw new OutOfRangeException(index, 0, totalSize); } final int[] indices = new int[dimension]; int count = 0; for (int i = 0; i < last; i++) { int idx = 0; final int offset = uniCounterOffset[i]; while (count <= index) { count += offset; ++idx; } --idx; count -= offset; indices[i] = idx; } indices[last] = index - count; return indices; }
public int [ ] getCounts ( int index ) { if ( index < 0 || index >= totalSize ) { throw new OutOfRangeException ( index , 0 , totalSize ) ; } final int [ ] indices = new int [ dimension ] ; int count = 0 ; for ( int i = 0 ; i < last ; i ++ ) { int idx = 0 ; final int offset = uniCounterOffset [ i ] ; while ( count <= index ) { count += offset ; ++ idx ; } -- idx ; count -= offset ; indices [ i ] = idx ; } indices [ last ] = index - count ; return indices ; }
JacksonDatabind
64
src/main/java/com/fasterxml/jackson/databind/ser/PropertyBuilder.java
89
216
Further issues with `@JsonInclude` with `NON_DEFAULT`
(follow-up to #1351) Looks like there are still cases where class annotation like: ``` @JsonInclude(JsonInclude.Include.NON_DEFAULT) ``` does not work for default `null` value suppression for `String` type (at least).
@SuppressWarnings("deprecation") protected BeanPropertyWriter buildWriter(SerializerProvider prov, BeanPropertyDefinition propDef, JavaType declaredType, JsonSerializer<?> ser, TypeSerializer typeSer, TypeSerializer contentTypeSer, AnnotatedMember am, boolean defaultUseStaticTyping) throws JsonMappingException { // do we have annotation that forces type to use (to declared type or its super type)? JavaType serializationType; try { serializationType = findSerializationType(am, defaultUseStaticTyping, declaredType); } catch (JsonMappingException e) { return prov.reportBadPropertyDefinition(_beanDesc, propDef, e.getMessage()); } // Container types can have separate type serializers for content (value / element) type if (contentTypeSer != null) { /* 04-Feb-2010, tatu: Let's force static typing for collection, if there is * type information for contents. Should work well (for JAXB case); can be * revisited if this causes problems. */ if (serializationType == null) { // serializationType = TypeFactory.type(am.getGenericType(), _beanDesc.getType()); serializationType = declaredType; } JavaType ct = serializationType.getContentType(); // Not exactly sure why, but this used to occur; better check explicitly: if (ct == null) { prov.reportBadPropertyDefinition(_beanDesc, propDef, "serialization type "+serializationType+" has no content"); } serializationType = serializationType.withContentTypeHandler(contentTypeSer); ct = serializationType.getContentType(); } Object valueToSuppress = null; boolean suppressNulls = false; // 12-Jul-2016, tatu: [databind#1256] Need to make sure we consider type refinement JavaType actualType = (serializationType == null) ? declaredType : serializationType; // 17-Aug-2016, tatu: Default inclusion covers global default (for all types), as well // as type-default for enclosing POJO. What we need, then, is per-type default (if any) // for declared property type... and finally property annotation overrides JsonInclude.Value inclV = _config.getDefaultPropertyInclusion(actualType.getRawClass(), _defaultInclusion); // property annotation override inclV = inclV.withOverrides(propDef.findInclusion()); JsonInclude.Include inclusion = inclV.getValueInclusion(); if (inclusion == JsonInclude.Include.USE_DEFAULTS) { // should not occur but... inclusion = JsonInclude.Include.ALWAYS; } switch (inclusion) { case NON_DEFAULT: // 11-Nov-2015, tatu: This is tricky because semantics differ between cases, // so that if enclosing class has this, we may need to access values of property, // whereas for global defaults OR per-property overrides, we have more // static definition. Sigh. // First: case of class/type specifying it; try to find POJO property defaults // 16-Oct-2016, tatu: Note: if we can not for some reason create "default instance", // revert logic to the case of general/per-property handling, so both // type-default AND null are to be excluded. // (as per [databind#1417] if (_useRealPropertyDefaults) { // 07-Sep-2016, tatu: may also need to front-load access forcing now if (prov.isEnabled(MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS)) { am.fixAccess(_config.isEnabled(MapperFeature.OVERRIDE_PUBLIC_ACCESS_MODIFIERS)); } valueToSuppress = getPropertyDefaultValue(propDef.getName(), am, actualType); } else { valueToSuppress = getDefaultValue(actualType); suppressNulls = true; } if (valueToSuppress == null) { suppressNulls = true; } else { if (valueToSuppress.getClass().isArray()) { valueToSuppress = ArrayBuilders.getArrayComparator(valueToSuppress); } } break; case NON_ABSENT: // new with 2.6, to support Guava/JDK8 Optionals // always suppress nulls suppressNulls = true; // and for referential types, also "empty", which in their case means "absent" if (actualType.isReferenceType()) { valueToSuppress = BeanPropertyWriter.MARKER_FOR_EMPTY; } break; case NON_EMPTY: // always suppress nulls suppressNulls = true; // but possibly also 'empty' values: valueToSuppress = BeanPropertyWriter.MARKER_FOR_EMPTY; break; case NON_NULL: suppressNulls = true; // fall through case ALWAYS: // default default: // we may still want to suppress empty collections, as per [JACKSON-254]: if (actualType.isContainerType() && !_config.isEnabled(SerializationFeature.WRITE_EMPTY_JSON_ARRAYS)) { valueToSuppress = BeanPropertyWriter.MARKER_FOR_EMPTY; } break; } BeanPropertyWriter bpw = new BeanPropertyWriter(propDef, am, _beanDesc.getClassAnnotations(), declaredType, ser, typeSer, serializationType, suppressNulls, valueToSuppress); // How about custom null serializer? Object serDef = _annotationIntrospector.findNullSerializer(am); if (serDef != null) { bpw.assignNullSerializer(prov.serializerInstance(am, serDef)); } // And then, handling of unwrapping NameTransformer unwrapper = _annotationIntrospector.findUnwrappingNameTransformer(am); if (unwrapper != null) { bpw = bpw.unwrappingWriter(unwrapper); } return bpw; }
@ SuppressWarnings ( "deprecation" ) protected BeanPropertyWriter buildWriter ( SerializerProvider prov , BeanPropertyDefinition propDef , JavaType declaredType , JsonSerializer < ? > ser , TypeSerializer typeSer , TypeSerializer contentTypeSer , AnnotatedMember am , boolean defaultUseStaticTyping ) throws JsonMappingException { JavaType serializationType ; try { serializationType = findSerializationType ( am , defaultUseStaticTyping , declaredType ) ; } catch ( JsonMappingException e ) { return prov . reportBadPropertyDefinition ( _beanDesc , propDef , e . getMessage ( ) ) ; } if ( contentTypeSer != null ) { if ( serializationType == null ) { serializationType = declaredType ; } JavaType ct = serializationType . getContentType ( ) ; if ( ct == null ) { prov . reportBadPropertyDefinition ( _beanDesc , propDef , "serialization type " + serializationType + " has no content" ) ; } serializationType = serializationType . withContentTypeHandler ( contentTypeSer ) ; ct = serializationType . getContentType ( ) ; } Object valueToSuppress = null ; boolean suppressNulls = false ; JavaType actualType = ( serializationType == null ) ? declaredType : serializationType ; JsonInclude . Value inclV = _config . getDefaultPropertyInclusion ( actualType . getRawClass ( ) , _defaultInclusion ) ; inclV = inclV . withOverrides ( propDef . findInclusion ( ) ) ; JsonInclude . Include inclusion = inclV . getValueInclusion ( ) ; if ( inclusion == JsonInclude . Include . USE_DEFAULTS ) { inclusion = JsonInclude . Include . ALWAYS ; } switch ( inclusion ) { case NON_DEFAULT : if ( _useRealPropertyDefaults ) { if ( prov . isEnabled ( MapperFeature . CAN_OVERRIDE_ACCESS_MODIFIERS ) ) { am . fixAccess ( _config . isEnabled ( MapperFeature . OVERRIDE_PUBLIC_ACCESS_MODIFIERS ) ) ; } valueToSuppress = getPropertyDefaultValue ( propDef . getName ( ) , am , actualType ) ; } else { valueToSuppress = getDefaultValue ( actualType ) ; suppressNulls = true ; } if ( valueToSuppress == null ) { suppressNulls = true ; } else { if ( valueToSuppress . getClass ( ) . isArray ( ) ) { valueToSuppress = ArrayBuilders . getArrayComparator ( valueToSuppress ) ; } } break ; case NON_ABSENT : suppressNulls = true ; if ( actualType . isReferenceType ( ) ) { valueToSuppress = BeanPropertyWriter . MARKER_FOR_EMPTY ; } break ; case NON_EMPTY : suppressNulls = true ; valueToSuppress = BeanPropertyWriter . MARKER_FOR_EMPTY ; break ; case NON_NULL : suppressNulls = true ; case ALWAYS : default : if ( actualType . isContainerType ( ) && ! _config . isEnabled ( SerializationFeature . WRITE_EMPTY_JSON_ARRAYS ) ) { valueToSuppress = BeanPropertyWriter . MARKER_FOR_EMPTY ; } break ; } BeanPropertyWriter bpw = new BeanPropertyWriter ( propDef , am , _beanDesc . getClassAnnotations ( ) , declaredType , ser , typeSer , serializationType , suppressNulls , valueToSuppress ) ; Object serDef = _annotationIntrospector . findNullSerializer ( am ) ; if ( serDef != null ) { bpw . assignNullSerializer ( prov . serializerInstance ( am , serDef ) ) ; } NameTransformer unwrapper = _annotationIntrospector . findUnwrappingNameTransformer ( am ) ; if ( unwrapper != null ) { bpw = bpw . unwrappingWriter ( unwrapper ) ; } return bpw ; }
@SuppressWarnings("deprecation") protected BeanPropertyWriter buildWriter(SerializerProvider prov, BeanPropertyDefinition propDef, JavaType declaredType, JsonSerializer<?> ser, TypeSerializer typeSer, TypeSerializer contentTypeSer, AnnotatedMember am, boolean defaultUseStaticTyping) throws JsonMappingException { // do we have annotation that forces type to use (to declared type or its super type)? JavaType serializationType; try { serializationType = findSerializationType(am, defaultUseStaticTyping, declaredType); } catch (JsonMappingException e) { return prov.reportBadPropertyDefinition(_beanDesc, propDef, e.getMessage()); } // Container types can have separate type serializers for content (value / element) type if (contentTypeSer != null) { /* 04-Feb-2010, tatu: Let's force static typing for collection, if there is * type information for contents. Should work well (for JAXB case); can be * revisited if this causes problems. */ if (serializationType == null) { // serializationType = TypeFactory.type(am.getGenericType(), _beanDesc.getType()); serializationType = declaredType; } JavaType ct = serializationType.getContentType(); // Not exactly sure why, but this used to occur; better check explicitly: if (ct == null) { prov.reportBadPropertyDefinition(_beanDesc, propDef, "serialization type "+serializationType+" has no content"); } serializationType = serializationType.withContentTypeHandler(contentTypeSer); ct = serializationType.getContentType(); } Object valueToSuppress = null; boolean suppressNulls = false; // 12-Jul-2016, tatu: [databind#1256] Need to make sure we consider type refinement JavaType actualType = (serializationType == null) ? declaredType : serializationType; // 17-Aug-2016, tatu: Default inclusion covers global default (for all types), as well // as type-default for enclosing POJO. What we need, then, is per-type default (if any) // for declared property type... and finally property annotation overrides JsonInclude.Value inclV = _config.getDefaultPropertyInclusion(actualType.getRawClass(), _defaultInclusion); // property annotation override inclV = inclV.withOverrides(propDef.findInclusion()); JsonInclude.Include inclusion = inclV.getValueInclusion(); if (inclusion == JsonInclude.Include.USE_DEFAULTS) { // should not occur but... inclusion = JsonInclude.Include.ALWAYS; } switch (inclusion) { case NON_DEFAULT: // 11-Nov-2015, tatu: This is tricky because semantics differ between cases, // so that if enclosing class has this, we may need to access values of property, // whereas for global defaults OR per-property overrides, we have more // static definition. Sigh. // First: case of class/type specifying it; try to find POJO property defaults Object defaultBean; // 16-Oct-2016, tatu: Note: if we can not for some reason create "default instance", // revert logic to the case of general/per-property handling, so both // type-default AND null are to be excluded. // (as per [databind#1417] if (_useRealPropertyDefaults && (defaultBean = getDefaultBean()) != null) { // 07-Sep-2016, tatu: may also need to front-load access forcing now if (prov.isEnabled(MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS)) { am.fixAccess(_config.isEnabled(MapperFeature.OVERRIDE_PUBLIC_ACCESS_MODIFIERS)); } try { valueToSuppress = am.getValue(defaultBean); } catch (Exception e) { _throwWrapped(e, propDef.getName(), defaultBean); } } else { valueToSuppress = getDefaultValue(actualType); suppressNulls = true; } if (valueToSuppress == null) { suppressNulls = true; } else { if (valueToSuppress.getClass().isArray()) { valueToSuppress = ArrayBuilders.getArrayComparator(valueToSuppress); } } break; case NON_ABSENT: // new with 2.6, to support Guava/JDK8 Optionals // always suppress nulls suppressNulls = true; // and for referential types, also "empty", which in their case means "absent" if (actualType.isReferenceType()) { valueToSuppress = BeanPropertyWriter.MARKER_FOR_EMPTY; } break; case NON_EMPTY: // always suppress nulls suppressNulls = true; // but possibly also 'empty' values: valueToSuppress = BeanPropertyWriter.MARKER_FOR_EMPTY; break; case NON_NULL: suppressNulls = true; // fall through case ALWAYS: // default default: // we may still want to suppress empty collections, as per [JACKSON-254]: if (actualType.isContainerType() && !_config.isEnabled(SerializationFeature.WRITE_EMPTY_JSON_ARRAYS)) { valueToSuppress = BeanPropertyWriter.MARKER_FOR_EMPTY; } break; } BeanPropertyWriter bpw = new BeanPropertyWriter(propDef, am, _beanDesc.getClassAnnotations(), declaredType, ser, typeSer, serializationType, suppressNulls, valueToSuppress); // How about custom null serializer? Object serDef = _annotationIntrospector.findNullSerializer(am); if (serDef != null) { bpw.assignNullSerializer(prov.serializerInstance(am, serDef)); } // And then, handling of unwrapping NameTransformer unwrapper = _annotationIntrospector.findUnwrappingNameTransformer(am); if (unwrapper != null) { bpw = bpw.unwrappingWriter(unwrapper); } return bpw; }
@ SuppressWarnings ( "deprecation" ) protected BeanPropertyWriter buildWriter ( SerializerProvider prov , BeanPropertyDefinition propDef , JavaType declaredType , JsonSerializer < ? > ser , TypeSerializer typeSer , TypeSerializer contentTypeSer , AnnotatedMember am , boolean defaultUseStaticTyping ) throws JsonMappingException { JavaType serializationType ; try { serializationType = findSerializationType ( am , defaultUseStaticTyping , declaredType ) ; } catch ( JsonMappingException e ) { return prov . reportBadPropertyDefinition ( _beanDesc , propDef , e . getMessage ( ) ) ; } if ( contentTypeSer != null ) { if ( serializationType == null ) { serializationType = declaredType ; } JavaType ct = serializationType . getContentType ( ) ; if ( ct == null ) { prov . reportBadPropertyDefinition ( _beanDesc , propDef , "serialization type " + serializationType + " has no content" ) ; } serializationType = serializationType . withContentTypeHandler ( contentTypeSer ) ; ct = serializationType . getContentType ( ) ; } Object valueToSuppress = null ; boolean suppressNulls = false ; JavaType actualType = ( serializationType == null ) ? declaredType : serializationType ; JsonInclude . Value inclV = _config . getDefaultPropertyInclusion ( actualType . getRawClass ( ) , _defaultInclusion ) ; inclV = inclV . withOverrides ( propDef . findInclusion ( ) ) ; JsonInclude . Include inclusion = inclV . getValueInclusion ( ) ; if ( inclusion == JsonInclude . Include . USE_DEFAULTS ) { inclusion = JsonInclude . Include . ALWAYS ; } switch ( inclusion ) { case NON_DEFAULT : Object defaultBean ; if ( _useRealPropertyDefaults && ( defaultBean = getDefaultBean ( ) ) != null ) { if ( prov . isEnabled ( MapperFeature . CAN_OVERRIDE_ACCESS_MODIFIERS ) ) { am . fixAccess ( _config . isEnabled ( MapperFeature . OVERRIDE_PUBLIC_ACCESS_MODIFIERS ) ) ; } try { valueToSuppress = am . getValue ( defaultBean ) ; } catch ( Exception e ) { _throwWrapped ( e , propDef . getName ( ) , defaultBean ) ; } } else { valueToSuppress = getDefaultValue ( actualType ) ; suppressNulls = true ; } if ( valueToSuppress == null ) { suppressNulls = true ; } else { if ( valueToSuppress . getClass ( ) . isArray ( ) ) { valueToSuppress = ArrayBuilders . getArrayComparator ( valueToSuppress ) ; } } break ; case NON_ABSENT : suppressNulls = true ; if ( actualType . isReferenceType ( ) ) { valueToSuppress = BeanPropertyWriter . MARKER_FOR_EMPTY ; } break ; case NON_EMPTY : suppressNulls = true ; valueToSuppress = BeanPropertyWriter . MARKER_FOR_EMPTY ; break ; case NON_NULL : suppressNulls = true ; case ALWAYS : default : if ( actualType . isContainerType ( ) && ! _config . isEnabled ( SerializationFeature . WRITE_EMPTY_JSON_ARRAYS ) ) { valueToSuppress = BeanPropertyWriter . MARKER_FOR_EMPTY ; } break ; } BeanPropertyWriter bpw = new BeanPropertyWriter ( propDef , am , _beanDesc . getClassAnnotations ( ) , declaredType , ser , typeSer , serializationType , suppressNulls , valueToSuppress ) ; Object serDef = _annotationIntrospector . findNullSerializer ( am ) ; if ( serDef != null ) { bpw . assignNullSerializer ( prov . serializerInstance ( am , serDef ) ) ; } NameTransformer unwrapper = _annotationIntrospector . findUnwrappingNameTransformer ( am ) ; if ( unwrapper != null ) { bpw = bpw . unwrappingWriter ( unwrapper ) ; } return bpw ; }
Math
40
src/main/java/org/apache/commons/math/analysis/solvers/BracketingNthOrderBrentSolver.java
142
345
BracketingNthOrderBrentSolver exceeds maxIterationCount while updating always the same boundary
In some cases, the aging feature in BracketingNthOrderBrentSolver fails. It attempts to balance the bracketing points by targeting a non-zero value instead of the real root. However, the chosen target is too close too zero, and the inverse polynomial approximation is always on the same side, thus always updates the same bracket. In the real used case for a large program, I had a bracket point xA = 12500.0, yA = 3.7e-16, agingA = 0, which is the (really good) estimate of the zero on one side of the root and xB = 12500.03, yB = -7.0e-5, agingB = 97. This shows that the bracketing interval is completely unbalanced, and we never succeed to rebalance it as we always updates (xA, yA) and never updates (xB, yB).
@Override protected double doSolve() { // prepare arrays with the first points final double[] x = new double[maximalOrder + 1]; final double[] y = new double[maximalOrder + 1]; x[0] = getMin(); x[1] = getStartValue(); x[2] = getMax(); verifySequence(x[0], x[1], x[2]); // evaluate initial guess y[1] = computeObjectiveValue(x[1]); if (Precision.equals(y[1], 0.0, 1)) { // return the initial guess if it is a perfect root. return x[1]; } // evaluate first endpoint y[0] = computeObjectiveValue(x[0]); if (Precision.equals(y[0], 0.0, 1)) { // return the first endpoint if it is a perfect root. return x[0]; } int nbPoints; int signChangeIndex; if (y[0] * y[1] < 0) { // reduce interval if it brackets the root nbPoints = 2; signChangeIndex = 1; } else { // evaluate second endpoint y[2] = computeObjectiveValue(x[2]); if (Precision.equals(y[2], 0.0, 1)) { // return the second endpoint if it is a perfect root. return x[2]; } if (y[1] * y[2] < 0) { // use all computed point as a start sampling array for solving nbPoints = 3; signChangeIndex = 2; } else { throw new NoBracketingException(x[0], x[2], y[0], y[2]); } } // prepare a work array for inverse polynomial interpolation final double[] tmpX = new double[x.length]; // current tightest bracketing of the root double xA = x[signChangeIndex - 1]; double yA = y[signChangeIndex - 1]; double absYA = FastMath.abs(yA); int agingA = 0; double xB = x[signChangeIndex]; double yB = y[signChangeIndex]; double absYB = FastMath.abs(yB); int agingB = 0; // search loop while (true) { // check convergence of bracketing interval final double xTol = getAbsoluteAccuracy() + getRelativeAccuracy() * FastMath.max(FastMath.abs(xA), FastMath.abs(xB)); if (((xB - xA) <= xTol) || (FastMath.max(absYA, absYB) < getFunctionValueAccuracy())) { switch (allowed) { case ANY_SIDE : return absYA < absYB ? xA : xB; case LEFT_SIDE : return xA; case RIGHT_SIDE : return xB; case BELOW_SIDE : return (yA <= 0) ? xA : xB; case ABOVE_SIDE : return (yA < 0) ? xB : xA; default : // this should never happen throw new MathInternalError(null); } } // target for the next evaluation point double targetY; if (agingA >= MAXIMAL_AGING) { // we keep updating the high bracket, try to compensate this targetY = -REDUCTION_FACTOR * yB; } else if (agingB >= MAXIMAL_AGING) { // we keep updating the low bracket, try to compensate this targetY = -REDUCTION_FACTOR * yA; } else { // bracketing is balanced, try to find the root itself targetY = 0; } // make a few attempts to guess a root, double nextX; int start = 0; int end = nbPoints; do { // guess a value for current target, using inverse polynomial interpolation System.arraycopy(x, start, tmpX, start, end - start); nextX = guessX(targetY, tmpX, y, start, end); if (!((nextX > xA) && (nextX < xB))) { // the guessed root is not strictly inside of the tightest bracketing interval // the guessed root is either not strictly inside the interval or it // is a NaN (which occurs when some sampling points share the same y) // we try again with a lower interpolation order if (signChangeIndex - start >= end - signChangeIndex) { // we have more points before the sign change, drop the lowest point ++start; } else { // we have more points after sign change, drop the highest point --end; } // we need to do one more attempt nextX = Double.NaN; } } while (Double.isNaN(nextX) && (end - start > 1)); if (Double.isNaN(nextX)) { // fall back to bisection nextX = xA + 0.5 * (xB - xA); start = signChangeIndex - 1; end = signChangeIndex; } // evaluate the function at the guessed root final double nextY = computeObjectiveValue(nextX); if (Precision.equals(nextY, 0.0, 1)) { // we have found an exact root, since it is not an approximation // we don't need to bother about the allowed solutions setting return nextX; } if ((nbPoints > 2) && (end - start != nbPoints)) { // we have been forced to ignore some points to keep bracketing, // they are probably too far from the root, drop them from now on nbPoints = end - start; System.arraycopy(x, start, x, 0, nbPoints); System.arraycopy(y, start, y, 0, nbPoints); signChangeIndex -= start; } else if (nbPoints == x.length) { // we have to drop one point in order to insert the new one nbPoints--; // keep the tightest bracketing interval as centered as possible if (signChangeIndex >= (x.length + 1) / 2) { // we drop the lowest point, we have to shift the arrays and the index System.arraycopy(x, 1, x, 0, nbPoints); System.arraycopy(y, 1, y, 0, nbPoints); --signChangeIndex; } } // insert the last computed point //(by construction, we know it lies inside the tightest bracketing interval) System.arraycopy(x, signChangeIndex, x, signChangeIndex + 1, nbPoints - signChangeIndex); x[signChangeIndex] = nextX; System.arraycopy(y, signChangeIndex, y, signChangeIndex + 1, nbPoints - signChangeIndex); y[signChangeIndex] = nextY; ++nbPoints; // update the bracketing interval if (nextY * yA <= 0) { // the sign change occurs before the inserted point xB = nextX; yB = nextY; absYB = FastMath.abs(yB); ++agingA; agingB = 0; } else { // the sign change occurs after the inserted point xA = nextX; yA = nextY; absYA = FastMath.abs(yA); agingA = 0; ++agingB; // update the sign change index signChangeIndex++; } } }
@ Override protected double doSolve ( ) { final double [ ] x = new double [ maximalOrder + 1 ] ; final double [ ] y = new double [ maximalOrder + 1 ] ; x [ 0 ] = getMin ( ) ; x [ 1 ] = getStartValue ( ) ; x [ 2 ] = getMax ( ) ; verifySequence ( x [ 0 ] , x [ 1 ] , x [ 2 ] ) ; y [ 1 ] = computeObjectiveValue ( x [ 1 ] ) ; if ( Precision . equals ( y [ 1 ] , 0.0 , 1 ) ) { return x [ 1 ] ; } y [ 0 ] = computeObjectiveValue ( x [ 0 ] ) ; if ( Precision . equals ( y [ 0 ] , 0.0 , 1 ) ) { return x [ 0 ] ; } int nbPoints ; int signChangeIndex ; if ( y [ 0 ] * y [ 1 ] < 0 ) { nbPoints = 2 ; signChangeIndex = 1 ; } else { y [ 2 ] = computeObjectiveValue ( x [ 2 ] ) ; if ( Precision . equals ( y [ 2 ] , 0.0 , 1 ) ) { return x [ 2 ] ; } if ( y [ 1 ] * y [ 2 ] < 0 ) { nbPoints = 3 ; signChangeIndex = 2 ; } else { throw new NoBracketingException ( x [ 0 ] , x [ 2 ] , y [ 0 ] , y [ 2 ] ) ; } } final double [ ] tmpX = new double [ x . length ] ; double xA = x [ signChangeIndex - 1 ] ; double yA = y [ signChangeIndex - 1 ] ; double absYA = FastMath . abs ( yA ) ; int agingA = 0 ; double xB = x [ signChangeIndex ] ; double yB = y [ signChangeIndex ] ; double absYB = FastMath . abs ( yB ) ; int agingB = 0 ; while ( true ) { final double xTol = getAbsoluteAccuracy ( ) + getRelativeAccuracy ( ) * FastMath . max ( FastMath . abs ( xA ) , FastMath . abs ( xB ) ) ; if ( ( ( xB - xA ) <= xTol ) || ( FastMath . max ( absYA , absYB ) < getFunctionValueAccuracy ( ) ) ) { switch ( allowed ) { case ANY_SIDE : return absYA < absYB ? xA : xB ; case LEFT_SIDE : return xA ; case RIGHT_SIDE : return xB ; case BELOW_SIDE : return ( yA <= 0 ) ? xA : xB ; case ABOVE_SIDE : return ( yA < 0 ) ? xB : xA ; default : throw new MathInternalError ( null ) ; } } double targetY ; if ( agingA >= MAXIMAL_AGING ) { targetY = - REDUCTION_FACTOR * yB ; } else if ( agingB >= MAXIMAL_AGING ) { targetY = - REDUCTION_FACTOR * yA ; } else { targetY = 0 ; } double nextX ; int start = 0 ; int end = nbPoints ; do { System . arraycopy ( x , start , tmpX , start , end - start ) ; nextX = guessX ( targetY , tmpX , y , start , end ) ; if ( ! ( ( nextX > xA ) && ( nextX < xB ) ) ) { if ( signChangeIndex - start >= end - signChangeIndex ) { ++ start ; } else { -- end ; } nextX = Double . NaN ; } } while ( Double . isNaN ( nextX ) && ( end - start > 1 ) ) ; if ( Double . isNaN ( nextX ) ) { nextX = xA + 0.5 * ( xB - xA ) ; start = signChangeIndex - 1 ; end = signChangeIndex ; } final double nextY = computeObjectiveValue ( nextX ) ; if ( Precision . equals ( nextY , 0.0 , 1 ) ) { return nextX ; } if ( ( nbPoints > 2 ) && ( end - start != nbPoints ) ) { nbPoints = end - start ; System . arraycopy ( x , start , x , 0 , nbPoints ) ; System . arraycopy ( y , start , y , 0 , nbPoints ) ; signChangeIndex -= start ; } else if ( nbPoints == x . length ) { nbPoints -- ; if ( signChangeIndex >= ( x . length + 1 ) / 2 ) { System . arraycopy ( x , 1 , x , 0 , nbPoints ) ; System . arraycopy ( y , 1 , y , 0 , nbPoints ) ; -- signChangeIndex ; } } System . arraycopy ( x , signChangeIndex , x , signChangeIndex + 1 , nbPoints - signChangeIndex ) ; x [ signChangeIndex ] = nextX ; System . arraycopy ( y , signChangeIndex , y , signChangeIndex + 1 , nbPoints - signChangeIndex ) ; y [ signChangeIndex ] = nextY ; ++ nbPoints ; if ( nextY * yA <= 0 ) { xB = nextX ; yB = nextY ; absYB = FastMath . abs ( yB ) ; ++ agingA ; agingB = 0 ; } else { xA = nextX ; yA = nextY ; absYA = FastMath . abs ( yA ) ; agingA = 0 ; ++ agingB ; signChangeIndex ++ ; } } }
@Override protected double doSolve() { // prepare arrays with the first points final double[] x = new double[maximalOrder + 1]; final double[] y = new double[maximalOrder + 1]; x[0] = getMin(); x[1] = getStartValue(); x[2] = getMax(); verifySequence(x[0], x[1], x[2]); // evaluate initial guess y[1] = computeObjectiveValue(x[1]); if (Precision.equals(y[1], 0.0, 1)) { // return the initial guess if it is a perfect root. return x[1]; } // evaluate first endpoint y[0] = computeObjectiveValue(x[0]); if (Precision.equals(y[0], 0.0, 1)) { // return the first endpoint if it is a perfect root. return x[0]; } int nbPoints; int signChangeIndex; if (y[0] * y[1] < 0) { // reduce interval if it brackets the root nbPoints = 2; signChangeIndex = 1; } else { // evaluate second endpoint y[2] = computeObjectiveValue(x[2]); if (Precision.equals(y[2], 0.0, 1)) { // return the second endpoint if it is a perfect root. return x[2]; } if (y[1] * y[2] < 0) { // use all computed point as a start sampling array for solving nbPoints = 3; signChangeIndex = 2; } else { throw new NoBracketingException(x[0], x[2], y[0], y[2]); } } // prepare a work array for inverse polynomial interpolation final double[] tmpX = new double[x.length]; // current tightest bracketing of the root double xA = x[signChangeIndex - 1]; double yA = y[signChangeIndex - 1]; double absYA = FastMath.abs(yA); int agingA = 0; double xB = x[signChangeIndex]; double yB = y[signChangeIndex]; double absYB = FastMath.abs(yB); int agingB = 0; // search loop while (true) { // check convergence of bracketing interval final double xTol = getAbsoluteAccuracy() + getRelativeAccuracy() * FastMath.max(FastMath.abs(xA), FastMath.abs(xB)); if (((xB - xA) <= xTol) || (FastMath.max(absYA, absYB) < getFunctionValueAccuracy())) { switch (allowed) { case ANY_SIDE : return absYA < absYB ? xA : xB; case LEFT_SIDE : return xA; case RIGHT_SIDE : return xB; case BELOW_SIDE : return (yA <= 0) ? xA : xB; case ABOVE_SIDE : return (yA < 0) ? xB : xA; default : // this should never happen throw new MathInternalError(null); } } // target for the next evaluation point double targetY; if (agingA >= MAXIMAL_AGING) { // we keep updating the high bracket, try to compensate this final int p = agingA - MAXIMAL_AGING; final double weightA = (1 << p) - 1; final double weightB = p + 1; targetY = (weightA * yA - weightB * REDUCTION_FACTOR * yB) / (weightA + weightB); } else if (agingB >= MAXIMAL_AGING) { // we keep updating the low bracket, try to compensate this final int p = agingB - MAXIMAL_AGING; final double weightA = p + 1; final double weightB = (1 << p) - 1; targetY = (weightB * yB - weightA * REDUCTION_FACTOR * yA) / (weightA + weightB); } else { // bracketing is balanced, try to find the root itself targetY = 0; } // make a few attempts to guess a root, double nextX; int start = 0; int end = nbPoints; do { // guess a value for current target, using inverse polynomial interpolation System.arraycopy(x, start, tmpX, start, end - start); nextX = guessX(targetY, tmpX, y, start, end); if (!((nextX > xA) && (nextX < xB))) { // the guessed root is not strictly inside of the tightest bracketing interval // the guessed root is either not strictly inside the interval or it // is a NaN (which occurs when some sampling points share the same y) // we try again with a lower interpolation order if (signChangeIndex - start >= end - signChangeIndex) { // we have more points before the sign change, drop the lowest point ++start; } else { // we have more points after sign change, drop the highest point --end; } // we need to do one more attempt nextX = Double.NaN; } } while (Double.isNaN(nextX) && (end - start > 1)); if (Double.isNaN(nextX)) { // fall back to bisection nextX = xA + 0.5 * (xB - xA); start = signChangeIndex - 1; end = signChangeIndex; } // evaluate the function at the guessed root final double nextY = computeObjectiveValue(nextX); if (Precision.equals(nextY, 0.0, 1)) { // we have found an exact root, since it is not an approximation // we don't need to bother about the allowed solutions setting return nextX; } if ((nbPoints > 2) && (end - start != nbPoints)) { // we have been forced to ignore some points to keep bracketing, // they are probably too far from the root, drop them from now on nbPoints = end - start; System.arraycopy(x, start, x, 0, nbPoints); System.arraycopy(y, start, y, 0, nbPoints); signChangeIndex -= start; } else if (nbPoints == x.length) { // we have to drop one point in order to insert the new one nbPoints--; // keep the tightest bracketing interval as centered as possible if (signChangeIndex >= (x.length + 1) / 2) { // we drop the lowest point, we have to shift the arrays and the index System.arraycopy(x, 1, x, 0, nbPoints); System.arraycopy(y, 1, y, 0, nbPoints); --signChangeIndex; } } // insert the last computed point //(by construction, we know it lies inside the tightest bracketing interval) System.arraycopy(x, signChangeIndex, x, signChangeIndex + 1, nbPoints - signChangeIndex); x[signChangeIndex] = nextX; System.arraycopy(y, signChangeIndex, y, signChangeIndex + 1, nbPoints - signChangeIndex); y[signChangeIndex] = nextY; ++nbPoints; // update the bracketing interval if (nextY * yA <= 0) { // the sign change occurs before the inserted point xB = nextX; yB = nextY; absYB = FastMath.abs(yB); ++agingA; agingB = 0; } else { // the sign change occurs after the inserted point xA = nextX; yA = nextY; absYA = FastMath.abs(yA); agingA = 0; ++agingB; // update the sign change index signChangeIndex++; } } }
@ Override protected double doSolve ( ) { final double [ ] x = new double [ maximalOrder + 1 ] ; final double [ ] y = new double [ maximalOrder + 1 ] ; x [ 0 ] = getMin ( ) ; x [ 1 ] = getStartValue ( ) ; x [ 2 ] = getMax ( ) ; verifySequence ( x [ 0 ] , x [ 1 ] , x [ 2 ] ) ; y [ 1 ] = computeObjectiveValue ( x [ 1 ] ) ; if ( Precision . equals ( y [ 1 ] , 0.0 , 1 ) ) { return x [ 1 ] ; } y [ 0 ] = computeObjectiveValue ( x [ 0 ] ) ; if ( Precision . equals ( y [ 0 ] , 0.0 , 1 ) ) { return x [ 0 ] ; } int nbPoints ; int signChangeIndex ; if ( y [ 0 ] * y [ 1 ] < 0 ) { nbPoints = 2 ; signChangeIndex = 1 ; } else { y [ 2 ] = computeObjectiveValue ( x [ 2 ] ) ; if ( Precision . equals ( y [ 2 ] , 0.0 , 1 ) ) { return x [ 2 ] ; } if ( y [ 1 ] * y [ 2 ] < 0 ) { nbPoints = 3 ; signChangeIndex = 2 ; } else { throw new NoBracketingException ( x [ 0 ] , x [ 2 ] , y [ 0 ] , y [ 2 ] ) ; } } final double [ ] tmpX = new double [ x . length ] ; double xA = x [ signChangeIndex - 1 ] ; double yA = y [ signChangeIndex - 1 ] ; double absYA = FastMath . abs ( yA ) ; int agingA = 0 ; double xB = x [ signChangeIndex ] ; double yB = y [ signChangeIndex ] ; double absYB = FastMath . abs ( yB ) ; int agingB = 0 ; while ( true ) { final double xTol = getAbsoluteAccuracy ( ) + getRelativeAccuracy ( ) * FastMath . max ( FastMath . abs ( xA ) , FastMath . abs ( xB ) ) ; if ( ( ( xB - xA ) <= xTol ) || ( FastMath . max ( absYA , absYB ) < getFunctionValueAccuracy ( ) ) ) { switch ( allowed ) { case ANY_SIDE : return absYA < absYB ? xA : xB ; case LEFT_SIDE : return xA ; case RIGHT_SIDE : return xB ; case BELOW_SIDE : return ( yA <= 0 ) ? xA : xB ; case ABOVE_SIDE : return ( yA < 0 ) ? xB : xA ; default : throw new MathInternalError ( null ) ; } } double targetY ; if ( agingA >= MAXIMAL_AGING ) { final int p = agingA - MAXIMAL_AGING ; final double weightA = ( 1 << p ) - 1 ; final double weightB = p + 1 ; targetY = ( weightA * yA - weightB * REDUCTION_FACTOR * yB ) / ( weightA + weightB ) ; } else if ( agingB >= MAXIMAL_AGING ) { final int p = agingB - MAXIMAL_AGING ; final double weightA = p + 1 ; final double weightB = ( 1 << p ) - 1 ; targetY = ( weightB * yB - weightA * REDUCTION_FACTOR * yA ) / ( weightA + weightB ) ; } else { targetY = 0 ; } double nextX ; int start = 0 ; int end = nbPoints ; do { System . arraycopy ( x , start , tmpX , start , end - start ) ; nextX = guessX ( targetY , tmpX , y , start , end ) ; if ( ! ( ( nextX > xA ) && ( nextX < xB ) ) ) { if ( signChangeIndex - start >= end - signChangeIndex ) { ++ start ; } else { -- end ; } nextX = Double . NaN ; } } while ( Double . isNaN ( nextX ) && ( end - start > 1 ) ) ; if ( Double . isNaN ( nextX ) ) { nextX = xA + 0.5 * ( xB - xA ) ; start = signChangeIndex - 1 ; end = signChangeIndex ; } final double nextY = computeObjectiveValue ( nextX ) ; if ( Precision . equals ( nextY , 0.0 , 1 ) ) { return nextX ; } if ( ( nbPoints > 2 ) && ( end - start != nbPoints ) ) { nbPoints = end - start ; System . arraycopy ( x , start , x , 0 , nbPoints ) ; System . arraycopy ( y , start , y , 0 , nbPoints ) ; signChangeIndex -= start ; } else if ( nbPoints == x . length ) { nbPoints -- ; if ( signChangeIndex >= ( x . length + 1 ) / 2 ) { System . arraycopy ( x , 1 , x , 0 , nbPoints ) ; System . arraycopy ( y , 1 , y , 0 , nbPoints ) ; -- signChangeIndex ; } } System . arraycopy ( x , signChangeIndex , x , signChangeIndex + 1 , nbPoints - signChangeIndex ) ; x [ signChangeIndex ] = nextX ; System . arraycopy ( y , signChangeIndex , y , signChangeIndex + 1 , nbPoints - signChangeIndex ) ; y [ signChangeIndex ] = nextY ; ++ nbPoints ; if ( nextY * yA <= 0 ) { xB = nextX ; yB = nextY ; absYB = FastMath . abs ( yB ) ; ++ agingA ; agingB = 0 ; } else { xA = nextX ; yA = nextY ; absYA = FastMath . abs ( yA ) ; agingA = 0 ; ++ agingB ; signChangeIndex ++ ; } } }
Math
17
src/main/java/org/apache/commons/math3/dfp/Dfp.java
1602
1604
Dfp Dfp.multiply(int x) does not comply with the general contract FieldElement.multiply(int n)
In class {{org.apache.commons.math3.Dfp}}, the method {{multiply(int n)}} is limited to {{0 <= n <= 9999}}. This is not consistent with the general contract of {{FieldElement.multiply(int n)}}, where there should be no limitation on the values of {{n}}.
public Dfp multiply(final int x) { return multiplyFast(x); }
public Dfp multiply ( final int x ) { return multiplyFast ( x ) ; }
public Dfp multiply(final int x) { if (x >= 0 && x < RADIX) { return multiplyFast(x); } else { return multiply(newInstance(x)); } }
public Dfp multiply ( final int x ) { if ( x >= 0 && x < RADIX ) { return multiplyFast ( x ) ; } else { return multiply ( newInstance ( x ) ) ; } }
Compress
18
src/main/java/org/apache/commons/compress/archivers/tar/TarArchiveOutputStream.java
454
491
Long directory names can not be stored in a tar archive because of error when writing PAX headers
Trying to add a directory to the TAR Archive that has a name longer than 100 bytes generates an exception with a stack trace similar to the following: {noformat} java.io.IOException: request to write '114' bytes exceeds size in header of '0' bytes for entry './PaxHeaders.X/layers/openstreetmap__osm.disy.net/.tiles/1.0.0/openstreetmap__osm.disy.net/default/' at org.apache.commons.compress.archivers.tar.TarArchiveOutputStream.write(TarArchiveOutputStream.java:385) at java.io.OutputStream.write(Unknown Source) at org.apache.commons.compress.archivers.tar.TarArchiveOutputStream.writePaxHeaders(TarArchiveOutputStream.java:485) at org.apache.commons.compress.archivers.tar.TarArchiveOutputStream.putArchiveEntry(TarArchiveOutputStream.java:312) at net.disy.lib.io.tar.TarUtilities.addFile(TarUtilities.java:116) at net.disy.lib.io.tar.TarUtilities.addDirectory(TarUtilities.java:158) at net.disy.lib.io.tar.TarUtilities.addDirectory(TarUtilities.java:162) at net.disy.lib.io.tar.TarUtilities.addDirectory(TarUtilities.java:162) at net.disy.lib.io.tar.TarUtilities.addDirectory(TarUtilities.java:162) at net.disy.lib.io.tar.TarUtilities.addDirectory(TarUtilities.java:162) at net.disy.lib.io.tar.TarUtilities.addDirectory(TarUtilities.java:162) at net.disy.lib.io.tar.TarUtilities.addDirectory(TarUtilities.java:162) at net.disy.lib.io.tar.TarUtilities.addDirectory(TarUtilities.java:162) at net.disy.lib.io.tar.TarUtilities.addDirectory(TarUtilities.java:162) at net.disy.lib.io.tar.TarUtilities.addDirectory(TarUtilities.java:162) at net.disy.lib.io.tar.TarUtilities.tar(TarUtilities.java:77) at net.disy.lib.io.tar.TarUtilities.tar(TarUtilities.java:42) at net.disy.gisterm.tilecacheset.export.TileCacheSetExporter.tarTreeStructure(TileCacheSetExporter.java:262) at net.disy.gisterm.tilecacheset.export.TileCacheSetExporter.export(TileCacheSetExporter.java:111) at net.disy.gisterm.tilecacheset.desktop.controller.ExportController$1.run(ExportController.java:81) ... 2 more {noformat} Informal source code investigation points to the problem being that for directory entries the code assumes that the length is 0 in putArchiveEntry (see TarArchiveOutputStream:321 ) but when writing the data, it actually writes some data (the filename) and the length written (filename size) is larger than the length expected (0).
void writePaxHeaders(String entryName, Map<String, String> headers) throws IOException { String name = "./PaxHeaders.X/" + stripTo7Bits(entryName); // TarEntry's constructor would think this is a directory // and not allow any data to be written if (name.length() >= TarConstants.NAMELEN) { name = name.substring(0, TarConstants.NAMELEN - 1); } TarArchiveEntry pex = new TarArchiveEntry(name, TarConstants.LF_PAX_EXTENDED_HEADER_LC); StringWriter w = new StringWriter(); for (Map.Entry<String, String> h : headers.entrySet()) { String key = h.getKey(); String value = h.getValue(); int len = key.length() + value.length() + 3 /* blank, equals and newline */ + 2 /* guess 9 < actual length < 100 */; String line = len + " " + key + "=" + value + "\n"; int actualLength = line.getBytes(CharsetNames.UTF_8).length; while (len != actualLength) { // Adjust for cases where length < 10 or > 100 // or where UTF-8 encoding isn't a single octet // per character. // Must be in loop as size may go from 99 to 100 in // first pass so we'd need a second. len = actualLength; line = len + " " + key + "=" + value + "\n"; actualLength = line.getBytes(CharsetNames.UTF_8).length; } w.write(line); } byte[] data = w.toString().getBytes(CharsetNames.UTF_8); pex.setSize(data.length); putArchiveEntry(pex); write(data); closeArchiveEntry(); }
void writePaxHeaders ( String entryName , Map < String , String > headers ) throws IOException { String name = "./PaxHeaders.X/" + stripTo7Bits ( entryName ) ; if ( name . length ( ) >= TarConstants . NAMELEN ) { name = name . substring ( 0 , TarConstants . NAMELEN - 1 ) ; } TarArchiveEntry pex = new TarArchiveEntry ( name , TarConstants . LF_PAX_EXTENDED_HEADER_LC ) ; StringWriter w = new StringWriter ( ) ; for ( Map . Entry < String , String > h : headers . entrySet ( ) ) { String key = h . getKey ( ) ; String value = h . getValue ( ) ; int len = key . length ( ) + value . length ( ) + 3 + 2 ; String line = len + " " + key + "=" + value + "\n" ; int actualLength = line . getBytes ( CharsetNames . UTF_8 ) . length ; while ( len != actualLength ) { len = actualLength ; line = len + " " + key + "=" + value + "\n" ; actualLength = line . getBytes ( CharsetNames . UTF_8 ) . length ; } w . write ( line ) ; } byte [ ] data = w . toString ( ) . getBytes ( CharsetNames . UTF_8 ) ; pex . setSize ( data . length ) ; putArchiveEntry ( pex ) ; write ( data ) ; closeArchiveEntry ( ) ; }
void writePaxHeaders(String entryName, Map<String, String> headers) throws IOException { String name = "./PaxHeaders.X/" + stripTo7Bits(entryName); while (name.endsWith("/")) { // TarEntry's constructor would think this is a directory // and not allow any data to be written name = name.substring(0, name.length() - 1); } if (name.length() >= TarConstants.NAMELEN) { name = name.substring(0, TarConstants.NAMELEN - 1); } TarArchiveEntry pex = new TarArchiveEntry(name, TarConstants.LF_PAX_EXTENDED_HEADER_LC); StringWriter w = new StringWriter(); for (Map.Entry<String, String> h : headers.entrySet()) { String key = h.getKey(); String value = h.getValue(); int len = key.length() + value.length() + 3 /* blank, equals and newline */ + 2 /* guess 9 < actual length < 100 */; String line = len + " " + key + "=" + value + "\n"; int actualLength = line.getBytes(CharsetNames.UTF_8).length; while (len != actualLength) { // Adjust for cases where length < 10 or > 100 // or where UTF-8 encoding isn't a single octet // per character. // Must be in loop as size may go from 99 to 100 in // first pass so we'd need a second. len = actualLength; line = len + " " + key + "=" + value + "\n"; actualLength = line.getBytes(CharsetNames.UTF_8).length; } w.write(line); } byte[] data = w.toString().getBytes(CharsetNames.UTF_8); pex.setSize(data.length); putArchiveEntry(pex); write(data); closeArchiveEntry(); }
void writePaxHeaders ( String entryName , Map < String , String > headers ) throws IOException { String name = "./PaxHeaders.X/" + stripTo7Bits ( entryName ) ; while ( name . endsWith ( "/" ) ) { name = name . substring ( 0 , name . length ( ) - 1 ) ; } if ( name . length ( ) >= TarConstants . NAMELEN ) { name = name . substring ( 0 , TarConstants . NAMELEN - 1 ) ; } TarArchiveEntry pex = new TarArchiveEntry ( name , TarConstants . LF_PAX_EXTENDED_HEADER_LC ) ; StringWriter w = new StringWriter ( ) ; for ( Map . Entry < String , String > h : headers . entrySet ( ) ) { String key = h . getKey ( ) ; String value = h . getValue ( ) ; int len = key . length ( ) + value . length ( ) + 3 + 2 ; String line = len + " " + key + "=" + value + "\n" ; int actualLength = line . getBytes ( CharsetNames . UTF_8 ) . length ; while ( len != actualLength ) { len = actualLength ; line = len + " " + key + "=" + value + "\n" ; actualLength = line . getBytes ( CharsetNames . UTF_8 ) . length ; } w . write ( line ) ; } byte [ ] data = w . toString ( ) . getBytes ( CharsetNames . UTF_8 ) ; pex . setSize ( data . length ) ; putArchiveEntry ( pex ) ; write ( data ) ; closeArchiveEntry ( ) ; }
Cli
17
src/java/org/apache/commons/cli/PosixParser.java
282
310
PosixParser keeps bursting tokens even if a non option character is found
PosixParser doesn't stop the bursting process of a token if stopAtNonOption is enabled and a non option character is encountered. For example if the options a and b are defined, with stopAtNonOption=true the following command line: {code}-azb{code} is turned into: {code}-a zb -b{code} the right output should be: {code}-a zb{code}
protected void burstToken(String token, boolean stopAtNonOption) { for (int i = 1; i < token.length(); i++) { String ch = String.valueOf(token.charAt(i)); if (options.hasOption(ch)) { tokens.add("-" + ch); currentOption = options.getOption(ch); if (currentOption.hasArg() && (token.length() != (i + 1))) { tokens.add(token.substring(i + 1)); break; } } else if (stopAtNonOption) { process(token.substring(i)); } else { tokens.add(token); break; } } }
protected void burstToken ( String token , boolean stopAtNonOption ) { for ( int i = 1 ; i < token . length ( ) ; i ++ ) { String ch = String . valueOf ( token . charAt ( i ) ) ; if ( options . hasOption ( ch ) ) { tokens . add ( "-" + ch ) ; currentOption = options . getOption ( ch ) ; if ( currentOption . hasArg ( ) && ( token . length ( ) != ( i + 1 ) ) ) { tokens . add ( token . substring ( i + 1 ) ) ; break ; } } else if ( stopAtNonOption ) { process ( token . substring ( i ) ) ; } else { tokens . add ( token ) ; break ; } } }
protected void burstToken(String token, boolean stopAtNonOption) { for (int i = 1; i < token.length(); i++) { String ch = String.valueOf(token.charAt(i)); if (options.hasOption(ch)) { tokens.add("-" + ch); currentOption = options.getOption(ch); if (currentOption.hasArg() && (token.length() != (i + 1))) { tokens.add(token.substring(i + 1)); break; } } else if (stopAtNonOption) { process(token.substring(i)); break; } else { tokens.add(token); break; } } }
protected void burstToken ( String token , boolean stopAtNonOption ) { for ( int i = 1 ; i < token . length ( ) ; i ++ ) { String ch = String . valueOf ( token . charAt ( i ) ) ; if ( options . hasOption ( ch ) ) { tokens . add ( "-" + ch ) ; currentOption = options . getOption ( ch ) ; if ( currentOption . hasArg ( ) && ( token . length ( ) != ( i + 1 ) ) ) { tokens . add ( token . substring ( i + 1 ) ) ; break ; } } else if ( stopAtNonOption ) { process ( token . substring ( i ) ) ; break ; } else { tokens . add ( token ) ; break ; } } }
JacksonDatabind
24
src/main/java/com/fasterxml/jackson/databind/cfg/BaseSettings.java
230
238
Configuring an ObjectMapper's DateFormat changes time zone when serialising Joda DateTime
The serialisation of Joda `DateTime` instances behaves differently in 2.6.0 vs 2.5.4 when the `ObjectMapper`'s had its `DateFormat` configured. The behaviour change is illustrated by the following code: ``` java public static void main(String[] args) throws JsonProcessingException { System.out.println(createObjectMapper() .writeValueAsString(new DateTime(1988, 6, 25, 20, 30, DateTimeZone.UTC))); } private static ObjectMapper createObjectMapper() { ObjectMapper mapper = new ObjectMapper(); mapper.registerModule(createJodaModule()); mapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false); System.out.println(mapper.getSerializationConfig().getTimeZone()); mapper.setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")); System.out.println(mapper.getSerializationConfig().getTimeZone()); return mapper; } private static SimpleModule createJodaModule() { SimpleModule module = new SimpleModule(); module.addSerializer(DateTime.class, new DateTimeSerializer( new JacksonJodaDateFormat(DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss") .withZoneUTC()))); return module; } ``` When run with Jackson 2.5.4 the output is: ``` sun.util.calendar.ZoneInfo[id="GMT",offset=0,dstSavings=0,useDaylight=false,transitions=0,lastRule=null] sun.util.calendar.ZoneInfo[id="GMT",offset=0,dstSavings=0,useDaylight=false,transitions=0,lastRule=null] "1988-06-25 20:30:00" ``` When run with Jackson 2.6.0 the output is: ``` sun.util.calendar.ZoneInfo[id="GMT",offset=0,dstSavings=0,useDaylight=false,transitions=0,lastRule=null] sun.util.calendar.ZoneInfo[id="Europe/London",offset=0,dstSavings=3600000,useDaylight=true,transitions=242,lastRule=java.util.SimpleTimeZone[id=Europe/London,offset=0,dstSavings=3600000,useDaylight=true,startYear=0,startMode=2,startMonth=2,startDay=-1,startDayOfWeek=1,startTime=3600000,startTimeMode=2,endMode=2,endMonth=9,endDay=-1,endDayOfWeek=1,endTime=3600000,endTimeMode=2]] "1988-06-25 21:30:00" ``` It looks like the fix for #824 is the cause. In 2.6, the call to `mapper.setDateFormat` causes the `ObjectMapper`'s time zone to be set to the JVM's default time zone. In 2.5.x, calling `mapper.setDateFormat` has no effect on its time zone.
public BaseSettings withDateFormat(DateFormat df) { if (_dateFormat == df) { return this; } TimeZone tz = (df == null) ? _timeZone : df.getTimeZone(); return new BaseSettings(_classIntrospector, _annotationIntrospector, _visibilityChecker, _propertyNamingStrategy, _typeFactory, _typeResolverBuilder, df, _handlerInstantiator, _locale, tz, _defaultBase64); }
public BaseSettings withDateFormat ( DateFormat df ) { if ( _dateFormat == df ) { return this ; } TimeZone tz = ( df == null ) ? _timeZone : df . getTimeZone ( ) ; return new BaseSettings ( _classIntrospector , _annotationIntrospector , _visibilityChecker , _propertyNamingStrategy , _typeFactory , _typeResolverBuilder , df , _handlerInstantiator , _locale , tz , _defaultBase64 ) ; }
public BaseSettings withDateFormat(DateFormat df) { if (_dateFormat == df) { return this; } return new BaseSettings(_classIntrospector, _annotationIntrospector, _visibilityChecker, _propertyNamingStrategy, _typeFactory, _typeResolverBuilder, df, _handlerInstantiator, _locale, _timeZone, _defaultBase64); }
public BaseSettings withDateFormat ( DateFormat df ) { if ( _dateFormat == df ) { return this ; } return new BaseSettings ( _classIntrospector , _annotationIntrospector , _visibilityChecker , _propertyNamingStrategy , _typeFactory , _typeResolverBuilder , df , _handlerInstantiator , _locale , _timeZone , _defaultBase64 ) ; }
Compress
19
src/main/java/org/apache/commons/compress/archivers/zip/Zip64ExtendedInformationExtraField.java
249
287
ZipException on reading valid zip64 file
ZipFile zip = new ZipFile(new File("ordertest-64.zip")); throws ZipException "central directory zip64 extended information extra field's length doesn't match central directory data. Expected length 16 but is 28". The archive was created by using DotNetZip-WinFormsTool uzing zip64 flag (forces always to make zip64 archives). Zip file is tested from the console: $zip -T ordertest-64.zip Output: test of ordertest-64.zip OK I can open the archive with FileRoller without problem on my machine, browse and extract it.
public void reparseCentralDirectoryData(boolean hasUncompressedSize, boolean hasCompressedSize, boolean hasRelativeHeaderOffset, boolean hasDiskStart) throws ZipException { if (rawCentralDirectoryData != null) { int expectedLength = (hasUncompressedSize ? DWORD : 0) + (hasCompressedSize ? DWORD : 0) + (hasRelativeHeaderOffset ? DWORD : 0) + (hasDiskStart ? WORD : 0); if (rawCentralDirectoryData.length != expectedLength) { throw new ZipException("central directory zip64 extended" + " information extra field's length" + " doesn't match central directory" + " data. Expected length " + expectedLength + " but is " + rawCentralDirectoryData.length); } int offset = 0; if (hasUncompressedSize) { size = new ZipEightByteInteger(rawCentralDirectoryData, offset); offset += DWORD; } if (hasCompressedSize) { compressedSize = new ZipEightByteInteger(rawCentralDirectoryData, offset); offset += DWORD; } if (hasRelativeHeaderOffset) { relativeHeaderOffset = new ZipEightByteInteger(rawCentralDirectoryData, offset); offset += DWORD; } if (hasDiskStart) { diskStart = new ZipLong(rawCentralDirectoryData, offset); offset += WORD; } } }
public void reparseCentralDirectoryData ( boolean hasUncompressedSize , boolean hasCompressedSize , boolean hasRelativeHeaderOffset , boolean hasDiskStart ) throws ZipException { if ( rawCentralDirectoryData != null ) { int expectedLength = ( hasUncompressedSize ? DWORD : 0 ) + ( hasCompressedSize ? DWORD : 0 ) + ( hasRelativeHeaderOffset ? DWORD : 0 ) + ( hasDiskStart ? WORD : 0 ) ; if ( rawCentralDirectoryData . length != expectedLength ) { throw new ZipException ( "central directory zip64 extended" + " information extra field's length" + " doesn't match central directory" + " data. Expected length " + expectedLength + " but is " + rawCentralDirectoryData . length ) ; } int offset = 0 ; if ( hasUncompressedSize ) { size = new ZipEightByteInteger ( rawCentralDirectoryData , offset ) ; offset += DWORD ; } if ( hasCompressedSize ) { compressedSize = new ZipEightByteInteger ( rawCentralDirectoryData , offset ) ; offset += DWORD ; } if ( hasRelativeHeaderOffset ) { relativeHeaderOffset = new ZipEightByteInteger ( rawCentralDirectoryData , offset ) ; offset += DWORD ; } if ( hasDiskStart ) { diskStart = new ZipLong ( rawCentralDirectoryData , offset ) ; offset += WORD ; } } }
public void reparseCentralDirectoryData(boolean hasUncompressedSize, boolean hasCompressedSize, boolean hasRelativeHeaderOffset, boolean hasDiskStart) throws ZipException { if (rawCentralDirectoryData != null) { int expectedLength = (hasUncompressedSize ? DWORD : 0) + (hasCompressedSize ? DWORD : 0) + (hasRelativeHeaderOffset ? DWORD : 0) + (hasDiskStart ? WORD : 0); if (rawCentralDirectoryData.length < expectedLength) { throw new ZipException("central directory zip64 extended" + " information extra field's length" + " doesn't match central directory" + " data. Expected length " + expectedLength + " but is " + rawCentralDirectoryData.length); } int offset = 0; if (hasUncompressedSize) { size = new ZipEightByteInteger(rawCentralDirectoryData, offset); offset += DWORD; } if (hasCompressedSize) { compressedSize = new ZipEightByteInteger(rawCentralDirectoryData, offset); offset += DWORD; } if (hasRelativeHeaderOffset) { relativeHeaderOffset = new ZipEightByteInteger(rawCentralDirectoryData, offset); offset += DWORD; } if (hasDiskStart) { diskStart = new ZipLong(rawCentralDirectoryData, offset); offset += WORD; } } }
public void reparseCentralDirectoryData ( boolean hasUncompressedSize , boolean hasCompressedSize , boolean hasRelativeHeaderOffset , boolean hasDiskStart ) throws ZipException { if ( rawCentralDirectoryData != null ) { int expectedLength = ( hasUncompressedSize ? DWORD : 0 ) + ( hasCompressedSize ? DWORD : 0 ) + ( hasRelativeHeaderOffset ? DWORD : 0 ) + ( hasDiskStart ? WORD : 0 ) ; if ( rawCentralDirectoryData . length < expectedLength ) { throw new ZipException ( "central directory zip64 extended" + " information extra field's length" + " doesn't match central directory" + " data. Expected length " + expectedLength + " but is " + rawCentralDirectoryData . length ) ; } int offset = 0 ; if ( hasUncompressedSize ) { size = new ZipEightByteInteger ( rawCentralDirectoryData , offset ) ; offset += DWORD ; } if ( hasCompressedSize ) { compressedSize = new ZipEightByteInteger ( rawCentralDirectoryData , offset ) ; offset += DWORD ; } if ( hasRelativeHeaderOffset ) { relativeHeaderOffset = new ZipEightByteInteger ( rawCentralDirectoryData , offset ) ; offset += DWORD ; } if ( hasDiskStart ) { diskStart = new ZipLong ( rawCentralDirectoryData , offset ) ; offset += WORD ; } } }
Cli
40
src/main/java/org/apache/commons/cli/TypeHandler.java
62
105
TypeHandler should throw ParseException for an unsupported class
JavaDoc for TypeHandler states that createValue will {noformat} * @throws ParseException if the value creation for the given object type failedtype{noformat}  However createValue(String str, Class<?> clazz) will return null if the clazz is unknown.
@SuppressWarnings("unchecked") // returned value will have type T because it is fixed by clazz public static <T> T createValue(final String str, final Class<T> clazz) throws ParseException { if (PatternOptionBuilder.STRING_VALUE == clazz) { return (T) str; } else if (PatternOptionBuilder.OBJECT_VALUE == clazz) { return (T) createObject(str); } else if (PatternOptionBuilder.NUMBER_VALUE == clazz) { return (T) createNumber(str); } else if (PatternOptionBuilder.DATE_VALUE == clazz) { return (T) createDate(str); } else if (PatternOptionBuilder.CLASS_VALUE == clazz) { return (T) createClass(str); } else if (PatternOptionBuilder.FILE_VALUE == clazz) { return (T) createFile(str); } else if (PatternOptionBuilder.EXISTING_FILE_VALUE == clazz) { return (T) openFile(str); } else if (PatternOptionBuilder.FILES_VALUE == clazz) { return (T) createFiles(str); } else if (PatternOptionBuilder.URL_VALUE == clazz) { return (T) createURL(str); } else { return null; } }
@ SuppressWarnings ( "unchecked" ) public static < T > T createValue ( final String str , final Class < T > clazz ) throws ParseException { if ( PatternOptionBuilder . STRING_VALUE == clazz ) { return ( T ) str ; } else if ( PatternOptionBuilder . OBJECT_VALUE == clazz ) { return ( T ) createObject ( str ) ; } else if ( PatternOptionBuilder . NUMBER_VALUE == clazz ) { return ( T ) createNumber ( str ) ; } else if ( PatternOptionBuilder . DATE_VALUE == clazz ) { return ( T ) createDate ( str ) ; } else if ( PatternOptionBuilder . CLASS_VALUE == clazz ) { return ( T ) createClass ( str ) ; } else if ( PatternOptionBuilder . FILE_VALUE == clazz ) { return ( T ) createFile ( str ) ; } else if ( PatternOptionBuilder . EXISTING_FILE_VALUE == clazz ) { return ( T ) openFile ( str ) ; } else if ( PatternOptionBuilder . FILES_VALUE == clazz ) { return ( T ) createFiles ( str ) ; } else if ( PatternOptionBuilder . URL_VALUE == clazz ) { return ( T ) createURL ( str ) ; } else { return null ; } }
@SuppressWarnings("unchecked") // returned value will have type T because it is fixed by clazz public static <T> T createValue(final String str, final Class<T> clazz) throws ParseException { if (PatternOptionBuilder.STRING_VALUE == clazz) { return (T) str; } else if (PatternOptionBuilder.OBJECT_VALUE == clazz) { return (T) createObject(str); } else if (PatternOptionBuilder.NUMBER_VALUE == clazz) { return (T) createNumber(str); } else if (PatternOptionBuilder.DATE_VALUE == clazz) { return (T) createDate(str); } else if (PatternOptionBuilder.CLASS_VALUE == clazz) { return (T) createClass(str); } else if (PatternOptionBuilder.FILE_VALUE == clazz) { return (T) createFile(str); } else if (PatternOptionBuilder.EXISTING_FILE_VALUE == clazz) { return (T) openFile(str); } else if (PatternOptionBuilder.FILES_VALUE == clazz) { return (T) createFiles(str); } else if (PatternOptionBuilder.URL_VALUE == clazz) { return (T) createURL(str); } else { throw new ParseException("Unable to handle the class: " + clazz); } }
@ SuppressWarnings ( "unchecked" ) public static < T > T createValue ( final String str , final Class < T > clazz ) throws ParseException { if ( PatternOptionBuilder . STRING_VALUE == clazz ) { return ( T ) str ; } else if ( PatternOptionBuilder . OBJECT_VALUE == clazz ) { return ( T ) createObject ( str ) ; } else if ( PatternOptionBuilder . NUMBER_VALUE == clazz ) { return ( T ) createNumber ( str ) ; } else if ( PatternOptionBuilder . DATE_VALUE == clazz ) { return ( T ) createDate ( str ) ; } else if ( PatternOptionBuilder . CLASS_VALUE == clazz ) { return ( T ) createClass ( str ) ; } else if ( PatternOptionBuilder . FILE_VALUE == clazz ) { return ( T ) createFile ( str ) ; } else if ( PatternOptionBuilder . EXISTING_FILE_VALUE == clazz ) { return ( T ) openFile ( str ) ; } else if ( PatternOptionBuilder . FILES_VALUE == clazz ) { return ( T ) createFiles ( str ) ; } else if ( PatternOptionBuilder . URL_VALUE == clazz ) { return ( T ) createURL ( str ) ; } else { throw new ParseException ( "Unable to handle the class: " + clazz ) ; } }
Math
41
src/main/java/org/apache/commons/math/stat/descriptive/moment/Variance.java
501
532
One of Variance.evaluate() methods does not work correctly
The method org.apache.commons.math.stat.descriptive.moment.Variance.evaluate(double[] values, double[] weights, double mean, int begin, int length) does not work properly. Looks loke it ignores the length parameter and grabs the whole dataset. Similar method in Mean class seems to work. I did not check other methods taking the part of the array; they may have the same problem. Workaround: I had to shrink my arrays and use the method without the length.
public double evaluate(final double[] values, final double[] weights, final double mean, final int begin, final int length) { double var = Double.NaN; if (test(values, weights, begin, length)) { if (length == 1) { var = 0.0; } else if (length > 1) { double accum = 0.0; double dev = 0.0; double accum2 = 0.0; for (int i = begin; i < begin + length; i++) { dev = values[i] - mean; accum += weights[i] * (dev * dev); accum2 += weights[i] * dev; } double sumWts = 0; for (int i = 0; i < weights.length; i++) { sumWts += weights[i]; } if (isBiasCorrected) { var = (accum - (accum2 * accum2 / sumWts)) / (sumWts - 1.0); } else { var = (accum - (accum2 * accum2 / sumWts)) / sumWts; } } } return var; }
public double evaluate ( final double [ ] values , final double [ ] weights , final double mean , final int begin , final int length ) { double var = Double . NaN ; if ( test ( values , weights , begin , length ) ) { if ( length == 1 ) { var = 0.0 ; } else if ( length > 1 ) { double accum = 0.0 ; double dev = 0.0 ; double accum2 = 0.0 ; for ( int i = begin ; i < begin + length ; i ++ ) { dev = values [ i ] - mean ; accum += weights [ i ] * ( dev * dev ) ; accum2 += weights [ i ] * dev ; } double sumWts = 0 ; for ( int i = 0 ; i < weights . length ; i ++ ) { sumWts += weights [ i ] ; } if ( isBiasCorrected ) { var = ( accum - ( accum2 * accum2 / sumWts ) ) / ( sumWts - 1.0 ) ; } else { var = ( accum - ( accum2 * accum2 / sumWts ) ) / sumWts ; } } } return var ; }
public double evaluate(final double[] values, final double[] weights, final double mean, final int begin, final int length) { double var = Double.NaN; if (test(values, weights, begin, length)) { if (length == 1) { var = 0.0; } else if (length > 1) { double accum = 0.0; double dev = 0.0; double accum2 = 0.0; for (int i = begin; i < begin + length; i++) { dev = values[i] - mean; accum += weights[i] * (dev * dev); accum2 += weights[i] * dev; } double sumWts = 0; for (int i = begin; i < begin + length; i++) { sumWts += weights[i]; } if (isBiasCorrected) { var = (accum - (accum2 * accum2 / sumWts)) / (sumWts - 1.0); } else { var = (accum - (accum2 * accum2 / sumWts)) / sumWts; } } } return var; }
public double evaluate ( final double [ ] values , final double [ ] weights , final double mean , final int begin , final int length ) { double var = Double . NaN ; if ( test ( values , weights , begin , length ) ) { if ( length == 1 ) { var = 0.0 ; } else if ( length > 1 ) { double accum = 0.0 ; double dev = 0.0 ; double accum2 = 0.0 ; for ( int i = begin ; i < begin + length ; i ++ ) { dev = values [ i ] - mean ; accum += weights [ i ] * ( dev * dev ) ; accum2 += weights [ i ] * dev ; } double sumWts = 0 ; for ( int i = begin ; i < begin + length ; i ++ ) { sumWts += weights [ i ] ; } if ( isBiasCorrected ) { var = ( accum - ( accum2 * accum2 / sumWts ) ) / ( sumWts - 1.0 ) ; } else { var = ( accum - ( accum2 * accum2 / sumWts ) ) / sumWts ; } } } return var ; }
Math
57
src/main/java/org/apache/commons/math/stat/clustering/KMeansPlusPlusClusterer.java
161
198
Truncation issue in KMeansPlusPlusClusterer
The for loop inside KMeansPlusPlusClusterer.chooseInitialClusters defines a variable int sum = 0; This variable should have type double, rather than int. Using an int causes the method to truncate the distances between points to (square roots of) integers. It's especially bad when the distances between points are typically less than 1. As an aside, in version 2.2, this bug manifested itself by making the clusterer return empty clusters. I wonder if the EmptyClusterStrategy would still be necessary if this bug were fixed.
private static <T extends Clusterable<T>> List<Cluster<T>> chooseInitialCenters(final Collection<T> points, final int k, final Random random) { final List<T> pointSet = new ArrayList<T>(points); final List<Cluster<T>> resultSet = new ArrayList<Cluster<T>>(); // Choose one center uniformly at random from among the data points. final T firstPoint = pointSet.remove(random.nextInt(pointSet.size())); resultSet.add(new Cluster<T>(firstPoint)); final double[] dx2 = new double[pointSet.size()]; while (resultSet.size() < k) { // For each data point x, compute D(x), the distance between x and // the nearest center that has already been chosen. int sum = 0; for (int i = 0; i < pointSet.size(); i++) { final T p = pointSet.get(i); final Cluster<T> nearest = getNearestCluster(resultSet, p); final double d = p.distanceFrom(nearest.getCenter()); sum += d * d; dx2[i] = sum; } // Add one new data point as a center. Each point x is chosen with // probability proportional to D(x)2 final double r = random.nextDouble() * sum; for (int i = 0 ; i < dx2.length; i++) { if (dx2[i] >= r) { final T p = pointSet.remove(i); resultSet.add(new Cluster<T>(p)); break; } } } return resultSet; }
private static < T extends Clusterable < T > > List < Cluster < T > > chooseInitialCenters ( final Collection < T > points , final int k , final Random random ) { final List < T > pointSet = new ArrayList < T > ( points ) ; final List < Cluster < T > > resultSet = new ArrayList < Cluster < T > > ( ) ; final T firstPoint = pointSet . remove ( random . nextInt ( pointSet . size ( ) ) ) ; resultSet . add ( new Cluster < T > ( firstPoint ) ) ; final double [ ] dx2 = new double [ pointSet . size ( ) ] ; while ( resultSet . size ( ) < k ) { int sum = 0 ; for ( int i = 0 ; i < pointSet . size ( ) ; i ++ ) { final T p = pointSet . get ( i ) ; final Cluster < T > nearest = getNearestCluster ( resultSet , p ) ; final double d = p . distanceFrom ( nearest . getCenter ( ) ) ; sum += d * d ; dx2 [ i ] = sum ; } final double r = random . nextDouble ( ) * sum ; for ( int i = 0 ; i < dx2 . length ; i ++ ) { if ( dx2 [ i ] >= r ) { final T p = pointSet . remove ( i ) ; resultSet . add ( new Cluster < T > ( p ) ) ; break ; } } } return resultSet ; }
private static <T extends Clusterable<T>> List<Cluster<T>> chooseInitialCenters(final Collection<T> points, final int k, final Random random) { final List<T> pointSet = new ArrayList<T>(points); final List<Cluster<T>> resultSet = new ArrayList<Cluster<T>>(); // Choose one center uniformly at random from among the data points. final T firstPoint = pointSet.remove(random.nextInt(pointSet.size())); resultSet.add(new Cluster<T>(firstPoint)); final double[] dx2 = new double[pointSet.size()]; while (resultSet.size() < k) { // For each data point x, compute D(x), the distance between x and // the nearest center that has already been chosen. double sum = 0; for (int i = 0; i < pointSet.size(); i++) { final T p = pointSet.get(i); final Cluster<T> nearest = getNearestCluster(resultSet, p); final double d = p.distanceFrom(nearest.getCenter()); sum += d * d; dx2[i] = sum; } // Add one new data point as a center. Each point x is chosen with // probability proportional to D(x)2 final double r = random.nextDouble() * sum; for (int i = 0 ; i < dx2.length; i++) { if (dx2[i] >= r) { final T p = pointSet.remove(i); resultSet.add(new Cluster<T>(p)); break; } } } return resultSet; }
private static < T extends Clusterable < T > > List < Cluster < T > > chooseInitialCenters ( final Collection < T > points , final int k , final Random random ) { final List < T > pointSet = new ArrayList < T > ( points ) ; final List < Cluster < T > > resultSet = new ArrayList < Cluster < T > > ( ) ; final T firstPoint = pointSet . remove ( random . nextInt ( pointSet . size ( ) ) ) ; resultSet . add ( new Cluster < T > ( firstPoint ) ) ; final double [ ] dx2 = new double [ pointSet . size ( ) ] ; while ( resultSet . size ( ) < k ) { double sum = 0 ; for ( int i = 0 ; i < pointSet . size ( ) ; i ++ ) { final T p = pointSet . get ( i ) ; final Cluster < T > nearest = getNearestCluster ( resultSet , p ) ; final double d = p . distanceFrom ( nearest . getCenter ( ) ) ; sum += d * d ; dx2 [ i ] = sum ; } final double r = random . nextDouble ( ) * sum ; for ( int i = 0 ; i < dx2 . length ; i ++ ) { if ( dx2 [ i ] >= r ) { final T p = pointSet . remove ( i ) ; resultSet . add ( new Cluster < T > ( p ) ) ; break ; } } } return resultSet ; }
Math
101
src/java/org/apache/commons/math/complex/ComplexFormat.java
320
389
java.lang.StringIndexOutOfBoundsException in ComplexFormat.parse(String source, ParsePosition pos)
The parse(String source, ParsePosition pos) method in the ComplexFormat class does not check whether the imaginary character is set or not which produces StringIndexOutOfBoundsException in the substring method : (line 375 of ComplexFormat) ... // parse imaginary character int n = getImaginaryCharacter().length(); startIndex = pos.getIndex(); int endIndex = startIndex + n; if (source.substring(startIndex, endIndex).compareTo( getImaginaryCharacter()) != 0) { ... I encoutered this exception typing in a JTextFied with ComplexFormat set to look up an AbstractFormatter. If only the user types the imaginary part of the complex number first, he gets this exception. Solution: Before setting to n length of the imaginary character, check if the source contains it. My proposal: ... int n = 0; if (source.contains(getImaginaryCharacter())) n = getImaginaryCharacter().length(); ... F.S.
public Complex parse(String source, ParsePosition pos) { int initialIndex = pos.getIndex(); // parse whitespace parseAndIgnoreWhitespace(source, pos); // parse real Number re = parseNumber(source, getRealFormat(), pos); if (re == null) { // invalid real number // set index back to initial, error index should already be set // character examined. pos.setIndex(initialIndex); return null; } // parse sign int startIndex = pos.getIndex(); char c = parseNextCharacter(source, pos); int sign = 0; switch (c) { case 0 : // no sign // return real only complex number return new Complex(re.doubleValue(), 0.0); case '-' : sign = -1; break; case '+' : sign = 1; break; default : // invalid sign // set index back to initial, error index should be the last // character examined. pos.setIndex(initialIndex); pos.setErrorIndex(startIndex); return null; } // parse whitespace parseAndIgnoreWhitespace(source, pos); // parse imaginary Number im = parseNumber(source, getRealFormat(), pos); if (im == null) { // invalid imaginary number // set index back to initial, error index should already be set // character examined. pos.setIndex(initialIndex); return null; } // parse imaginary character int n = getImaginaryCharacter().length(); startIndex = pos.getIndex(); int endIndex = startIndex + n; if ( source.substring(startIndex, endIndex).compareTo( getImaginaryCharacter()) != 0) { // set index back to initial, error index should be the start index // character examined. pos.setIndex(initialIndex); pos.setErrorIndex(startIndex); return null; } pos.setIndex(endIndex); return new Complex(re.doubleValue(), im.doubleValue() * sign); }
public Complex parse ( String source , ParsePosition pos ) { int initialIndex = pos . getIndex ( ) ; parseAndIgnoreWhitespace ( source , pos ) ; Number re = parseNumber ( source , getRealFormat ( ) , pos ) ; if ( re == null ) { pos . setIndex ( initialIndex ) ; return null ; } int startIndex = pos . getIndex ( ) ; char c = parseNextCharacter ( source , pos ) ; int sign = 0 ; switch ( c ) { case 0 : return new Complex ( re . doubleValue ( ) , 0.0 ) ; case '-' : sign = - 1 ; break ; case '+' : sign = 1 ; break ; default : pos . setIndex ( initialIndex ) ; pos . setErrorIndex ( startIndex ) ; return null ; } parseAndIgnoreWhitespace ( source , pos ) ; Number im = parseNumber ( source , getRealFormat ( ) , pos ) ; if ( im == null ) { pos . setIndex ( initialIndex ) ; return null ; } int n = getImaginaryCharacter ( ) . length ( ) ; startIndex = pos . getIndex ( ) ; int endIndex = startIndex + n ; if ( source . substring ( startIndex , endIndex ) . compareTo ( getImaginaryCharacter ( ) ) != 0 ) { pos . setIndex ( initialIndex ) ; pos . setErrorIndex ( startIndex ) ; return null ; } pos . setIndex ( endIndex ) ; return new Complex ( re . doubleValue ( ) , im . doubleValue ( ) * sign ) ; }
public Complex parse(String source, ParsePosition pos) { int initialIndex = pos.getIndex(); // parse whitespace parseAndIgnoreWhitespace(source, pos); // parse real Number re = parseNumber(source, getRealFormat(), pos); if (re == null) { // invalid real number // set index back to initial, error index should already be set // character examined. pos.setIndex(initialIndex); return null; } // parse sign int startIndex = pos.getIndex(); char c = parseNextCharacter(source, pos); int sign = 0; switch (c) { case 0 : // no sign // return real only complex number return new Complex(re.doubleValue(), 0.0); case '-' : sign = -1; break; case '+' : sign = 1; break; default : // invalid sign // set index back to initial, error index should be the last // character examined. pos.setIndex(initialIndex); pos.setErrorIndex(startIndex); return null; } // parse whitespace parseAndIgnoreWhitespace(source, pos); // parse imaginary Number im = parseNumber(source, getRealFormat(), pos); if (im == null) { // invalid imaginary number // set index back to initial, error index should already be set // character examined. pos.setIndex(initialIndex); return null; } // parse imaginary character int n = getImaginaryCharacter().length(); startIndex = pos.getIndex(); int endIndex = startIndex + n; if ((startIndex >= source.length()) || (endIndex > source.length()) || source.substring(startIndex, endIndex).compareTo( getImaginaryCharacter()) != 0) { // set index back to initial, error index should be the start index // character examined. pos.setIndex(initialIndex); pos.setErrorIndex(startIndex); return null; } pos.setIndex(endIndex); return new Complex(re.doubleValue(), im.doubleValue() * sign); }
public Complex parse ( String source , ParsePosition pos ) { int initialIndex = pos . getIndex ( ) ; parseAndIgnoreWhitespace ( source , pos ) ; Number re = parseNumber ( source , getRealFormat ( ) , pos ) ; if ( re == null ) { pos . setIndex ( initialIndex ) ; return null ; } int startIndex = pos . getIndex ( ) ; char c = parseNextCharacter ( source , pos ) ; int sign = 0 ; switch ( c ) { case 0 : return new Complex ( re . doubleValue ( ) , 0.0 ) ; case '-' : sign = - 1 ; break ; case '+' : sign = 1 ; break ; default : pos . setIndex ( initialIndex ) ; pos . setErrorIndex ( startIndex ) ; return null ; } parseAndIgnoreWhitespace ( source , pos ) ; Number im = parseNumber ( source , getRealFormat ( ) , pos ) ; if ( im == null ) { pos . setIndex ( initialIndex ) ; return null ; } int n = getImaginaryCharacter ( ) . length ( ) ; startIndex = pos . getIndex ( ) ; int endIndex = startIndex + n ; if ( ( startIndex >= source . length ( ) ) || ( endIndex > source . length ( ) ) || source . substring ( startIndex , endIndex ) . compareTo ( getImaginaryCharacter ( ) ) != 0 ) { pos . setIndex ( initialIndex ) ; pos . setErrorIndex ( startIndex ) ; return null ; } pos . setIndex ( endIndex ) ; return new Complex ( re . doubleValue ( ) , im . doubleValue ( ) * sign ) ; }
Mockito
33
src/org/mockito/internal/invocation/InvocationMatcher.java
92
100
polimorphic call not dispatched correctly
Edge case around generics + inheritance. Below code did not work correctly due to polimorphic call not dispatched to correct method. protected interface MyIterable&lt;T&gt; extends Iterable&lt;T&gt; { public MyIterator&lt;T&gt; iterator(); } protected interface MyIterator&lt;T&gt; extends Iterator&lt;T&gt; { // adds nothing here } MyIterator&lt;String&gt; myIterator = Mockito.mock(MyIterator.class); MyIterable&lt;String&gt; iterable = Mockito.mock(MyIterable.class); @Test public void shouldStubbingWork() { Mockito.when(iterable.iterator()).thenReturn(myIterator); Assert.assertNotNull(((Iterable) iterable).iterator()); Assert.assertNotNull(iterable.iterator()); } @Test public void shouldVerificationWorks() { iterable.iterator(); verify(iterable).iterator(); verify((Iterable) iterable).iterator(); }
public boolean hasSameMethod(Invocation candidate) { //not using method.equals() for 1 good reason: //sometimes java generates forwarding methods when generics are in play see JavaGenericsForwardingMethodsTest Method m1 = invocation.getMethod(); Method m2 = candidate.getMethod(); /* Avoid unnecessary cloning */ return m1.equals(m2); }
public boolean hasSameMethod ( Invocation candidate ) { Method m1 = invocation . getMethod ( ) ; Method m2 = candidate . getMethod ( ) ; return m1 . equals ( m2 ) ; }
public boolean hasSameMethod(Invocation candidate) { //not using method.equals() for 1 good reason: //sometimes java generates forwarding methods when generics are in play see JavaGenericsForwardingMethodsTest Method m1 = invocation.getMethod(); Method m2 = candidate.getMethod(); if (m1.getName() != null && m1.getName().equals(m2.getName())) { /* Avoid unnecessary cloning */ Class[] params1 = m1.getParameterTypes(); Class[] params2 = m2.getParameterTypes(); if (params1.length == params2.length) { for (int i = 0; i < params1.length; i++) { if (params1[i] != params2[i]) return false; } return true; } } return false; }
public boolean hasSameMethod ( Invocation candidate ) { Method m1 = invocation . getMethod ( ) ; Method m2 = candidate . getMethod ( ) ; if ( m1 . getName ( ) != null && m1 . getName ( ) . equals ( m2 . getName ( ) ) ) { Class [ ] params1 = m1 . getParameterTypes ( ) ; Class [ ] params2 = m2 . getParameterTypes ( ) ; if ( params1 . length == params2 . length ) { for ( int i = 0 ; i < params1 . length ; i ++ ) { if ( params1 [ i ] != params2 [ i ] ) return false ; } return true ; } } return false ; }
JacksonDatabind
49
src/main/java/com/fasterxml/jackson/databind/ser/impl/WritableObjectId.java
46
52
JsonIdentityInfo incorrectly serializing forward references
I wrote this small test program to demonstrate the issue: ``` java import com.fasterxml.jackson.annotation.JsonIdentityInfo; import com.fasterxml.jackson.annotation.JsonIdentityReference; import com.fasterxml.jackson.annotation.ObjectIdGenerators; import com.fasterxml.jackson.databind.ObjectMapper; public class ObjectIdTest { public static class Foo { @JsonIdentityReference(alwaysAsId = true) public Bar bar1; @JsonIdentityReference() public Bar bar2; } @JsonIdentityInfo(generator = ObjectIdGenerators.IntSequenceGenerator.class) public static class Bar { } public static void main(String[] args) throws Exception { ObjectMapper mapper = new ObjectMapper(); // create structure to serialize Foo mo = new Foo(); mo.bar1 = new Bar(); mo.bar2 = mo.bar1; // serialize it System.out.println(mapper.writeValueAsString(mo)); } } ``` When executing this test program in the latest version (2.7.4), the output will be `{"bar1":1,"bar2":{"@id":2}}` - the second field will be written with a new id even though both fields reference the same object. Because of this, writing forward references is essentially impossible. The issue seems to be the fact that BeanSerializerBase will always call WritableObjectId.generateId if the referenced object has not been written in plain format yet (https://github.com/FasterXML/jackson-databind/blob/master/src/main/java/com/fasterxml/jackson/databind/ser/std/BeanSerializerBase.java#L600). This will also happen if an id has been generated before. It might also be smarter to only generate a new id in WritableObjectId.generateId if that hasn't happened before; as that method doesn't have a javadoc I can't tell how it is supposed to work.
public Object generateId(Object forPojo) { // 04-Jun-2016, tatu: As per [databind#1255], need to consider possibility of // id being generated for "alwaysAsId", but not being written as POJO; regardless, // need to use existing id if there is one: id = generator.generateId(forPojo); return id; }
public Object generateId ( Object forPojo ) { id = generator . generateId ( forPojo ) ; return id ; }
public Object generateId(Object forPojo) { // 04-Jun-2016, tatu: As per [databind#1255], need to consider possibility of // id being generated for "alwaysAsId", but not being written as POJO; regardless, // need to use existing id if there is one: if (id == null) { id = generator.generateId(forPojo); } return id; }
public Object generateId ( Object forPojo ) { if ( id == null ) { id = generator . generateId ( forPojo ) ; } return id ; }
Math
94
src/java/org/apache/commons/math/util/MathUtils.java
411
460
MathUtils.gcd(u, v) fails when u and v both contain a high power of 2
The test at the beginning of MathUtils.gcd(u, v) for arguments equal to zero fails when u and v contain high enough powers of 2 so that their product overflows to zero. assertEquals(3 * (1<<15), MathUtils.gcd(3 * (1<<20), 9 * (1<<15))); Fix: Replace the test at the start of MathUtils.gcd() if (u * v == 0) { by if (u == 0 || v == 0) {
public static int gcd(int u, int v) { if (u * v == 0) { return (Math.abs(u) + Math.abs(v)); } // keep u and v negative, as negative integers range down to // -2^31, while positive numbers can only be as large as 2^31-1 // (i.e. we can't necessarily negate a negative number without // overflow) /* assert u!=0 && v!=0; */ if (u > 0) { u = -u; } // make u negative if (v > 0) { v = -v; } // make v negative // B1. [Find power of 2] int k = 0; while ((u & 1) == 0 && (v & 1) == 0 && k < 31) { // while u and v are // both even... u /= 2; v /= 2; k++; // cast out twos. } if (k == 31) { throw new ArithmeticException("overflow: gcd is 2^31"); } // B2. Initialize: u and v have been divided by 2^k and at least // one is odd. int t = ((u & 1) == 1) ? v : -(u / 2)/* B3 */; // t negative: u was odd, v may be even (t replaces v) // t positive: u was even, v is odd (t replaces u) do { /* assert u<0 && v<0; */ // B4/B3: cast out twos from t. while ((t & 1) == 0) { // while t is even.. t /= 2; // cast out twos } // B5 [reset max(u,v)] if (t > 0) { u = -t; } else { v = t; } // B6/B3. at this point both u and v should be odd. t = (v - u) / 2; // |u| larger: t positive (replace u) // |v| larger: t negative (replace v) } while (t != 0); return -u * (1 << k); // gcd is u*2^k }
public static int gcd ( int u , int v ) { if ( u * v == 0 ) { return ( Math . abs ( u ) + Math . abs ( v ) ) ; } if ( u > 0 ) { u = - u ; } if ( v > 0 ) { v = - v ; } int k = 0 ; while ( ( u & 1 ) == 0 && ( v & 1 ) == 0 && k < 31 ) { u /= 2 ; v /= 2 ; k ++ ; } if ( k == 31 ) { throw new ArithmeticException ( "overflow: gcd is 2^31" ) ; } int t = ( ( u & 1 ) == 1 ) ? v : - ( u / 2 ) ; do { while ( ( t & 1 ) == 0 ) { t /= 2 ; } if ( t > 0 ) { u = - t ; } else { v = t ; } t = ( v - u ) / 2 ; } while ( t != 0 ) ; return - u * ( 1 << k ) ; }
public static int gcd(int u, int v) { if ((u == 0) || (v == 0)) { return (Math.abs(u) + Math.abs(v)); } // keep u and v negative, as negative integers range down to // -2^31, while positive numbers can only be as large as 2^31-1 // (i.e. we can't necessarily negate a negative number without // overflow) /* assert u!=0 && v!=0; */ if (u > 0) { u = -u; } // make u negative if (v > 0) { v = -v; } // make v negative // B1. [Find power of 2] int k = 0; while ((u & 1) == 0 && (v & 1) == 0 && k < 31) { // while u and v are // both even... u /= 2; v /= 2; k++; // cast out twos. } if (k == 31) { throw new ArithmeticException("overflow: gcd is 2^31"); } // B2. Initialize: u and v have been divided by 2^k and at least // one is odd. int t = ((u & 1) == 1) ? v : -(u / 2)/* B3 */; // t negative: u was odd, v may be even (t replaces v) // t positive: u was even, v is odd (t replaces u) do { /* assert u<0 && v<0; */ // B4/B3: cast out twos from t. while ((t & 1) == 0) { // while t is even.. t /= 2; // cast out twos } // B5 [reset max(u,v)] if (t > 0) { u = -t; } else { v = t; } // B6/B3. at this point both u and v should be odd. t = (v - u) / 2; // |u| larger: t positive (replace u) // |v| larger: t negative (replace v) } while (t != 0); return -u * (1 << k); // gcd is u*2^k }
public static int gcd ( int u , int v ) { if ( ( u == 0 ) || ( v == 0 ) ) { return ( Math . abs ( u ) + Math . abs ( v ) ) ; } if ( u > 0 ) { u = - u ; } if ( v > 0 ) { v = - v ; } int k = 0 ; while ( ( u & 1 ) == 0 && ( v & 1 ) == 0 && k < 31 ) { u /= 2 ; v /= 2 ; k ++ ; } if ( k == 31 ) { throw new ArithmeticException ( "overflow: gcd is 2^31" ) ; } int t = ( ( u & 1 ) == 1 ) ? v : - ( u / 2 ) ; do { while ( ( t & 1 ) == 0 ) { t /= 2 ; } if ( t > 0 ) { u = - t ; } else { v = t ; } t = ( v - u ) / 2 ; } while ( t != 0 ) ; return - u * ( 1 << k ) ; }
Compress
23
src/main/java/org/apache/commons/compress/archivers/sevenz/Coders.java
106
118
7z: 16 MB dictionary is too big
I created an archiv with 7zip 9.20 containing the compress-1.7-src directory. Also tried it with 1.6 version and directory. I downloaded the zip file and reziped it as 7z. The standard setting where used: Compression level: normal Compression method: lzma2 Dictionary size: 16 MB Word size: 32 Solid Block size: 2 GB I get an exception if I try to open the file with the simple line of code: SevenZFile input = new SevenZFile(new File(arcName)); Maybe it is a bug in the tukaani library, but I do not know how to report it to them. The exception thrown: org.tukaani.xz.UnsupportedOptionsException: LZMA dictionary is too big for this implementation at org.tukaani.xz.LZMAInputStream.initialize(Unknown Source) at org.tukaani.xz.LZMAInputStream.<init>(Unknown Source) at org.apache.commons.compress.archivers.sevenz.Coders$LZMADecoder.decode(Coders.java:117) at org.apache.commons.compress.archivers.sevenz.Coders.addDecoder(Coders.java:48) at org.apache.commons.compress.archivers.sevenz.SevenZFile.readEncodedHeader(SevenZFile.java:278) at org.apache.commons.compress.archivers.sevenz.SevenZFile.readHeaders(SevenZFile.java:190) at org.apache.commons.compress.archivers.sevenz.SevenZFile.<init>(SevenZFile.java:94) at org.apache.commons.compress.archivers.sevenz.SevenZFile.<init>(SevenZFile.java:116) at compress.SevenZipError.main(SevenZipError.java:28)
@Override InputStream decode(final InputStream in, final Coder coder, byte[] password) throws IOException { byte propsByte = coder.properties[0]; long dictSize = coder.properties[1]; for (int i = 1; i < 4; i++) { dictSize |= (coder.properties[i + 1] << (8 * i)); } if (dictSize > LZMAInputStream.DICT_SIZE_MAX) { throw new IOException("Dictionary larger than 4GiB maximum size"); } return new LZMAInputStream(in, -1, propsByte, (int) dictSize); }
@ Override InputStream decode ( final InputStream in , final Coder coder , byte [ ] password ) throws IOException { byte propsByte = coder . properties [ 0 ] ; long dictSize = coder . properties [ 1 ] ; for ( int i = 1 ; i < 4 ; i ++ ) { dictSize |= ( coder . properties [ i + 1 ] << ( 8 * i ) ) ; } if ( dictSize > LZMAInputStream . DICT_SIZE_MAX ) { throw new IOException ( "Dictionary larger than 4GiB maximum size" ) ; } return new LZMAInputStream ( in , - 1 , propsByte , ( int ) dictSize ) ; }
@Override InputStream decode(final InputStream in, final Coder coder, byte[] password) throws IOException { byte propsByte = coder.properties[0]; long dictSize = coder.properties[1]; for (int i = 1; i < 4; i++) { dictSize |= (coder.properties[i + 1] & 0xffl) << (8 * i); } if (dictSize > LZMAInputStream.DICT_SIZE_MAX) { throw new IOException("Dictionary larger than 4GiB maximum size"); } return new LZMAInputStream(in, -1, propsByte, (int) dictSize); }
@ Override InputStream decode ( final InputStream in , final Coder coder , byte [ ] password ) throws IOException { byte propsByte = coder . properties [ 0 ] ; long dictSize = coder . properties [ 1 ] ; for ( int i = 1 ; i < 4 ; i ++ ) { dictSize |= ( coder . properties [ i + 1 ] & 0xffl ) << ( 8 * i ) ; } if ( dictSize > LZMAInputStream . DICT_SIZE_MAX ) { throw new IOException ( "Dictionary larger than 4GiB maximum size" ) ; } return new LZMAInputStream ( in , - 1 , propsByte , ( int ) dictSize ) ; }
Compress
35
src/main/java/org/apache/commons/compress/archivers/tar/TarUtils.java
593
613
TAR checksum fails when checksum is right aligned
The linked TAR has a checksum with zero padding on the left instead of the expected {{NULL-SPACE}} terminator on the right. As a result the last two digits of the stored checksum are lost and the otherwise valid checksum is treated as invalid. Given that the code already checks for digits being in range before adding them to the stored sum, is it necessary to only look at the first 6 octal digits instead of the whole field?
public static boolean verifyCheckSum(byte[] header) { long storedSum = 0; long unsignedSum = 0; long signedSum = 0; int digits = 0; for (int i = 0; i < header.length; i++) { byte b = header[i]; if (CHKSUM_OFFSET <= i && i < CHKSUM_OFFSET + CHKSUMLEN) { if ('0' <= b && b <= '7' && digits++ < 6) { storedSum = storedSum * 8 + b - '0'; } else if (digits > 0) { digits = 6; } b = ' '; } unsignedSum += 0xff & b; signedSum += b; } return storedSum == unsignedSum || storedSum == signedSum; }
public static boolean verifyCheckSum ( byte [ ] header ) { long storedSum = 0 ; long unsignedSum = 0 ; long signedSum = 0 ; int digits = 0 ; for ( int i = 0 ; i < header . length ; i ++ ) { byte b = header [ i ] ; if ( CHKSUM_OFFSET <= i && i < CHKSUM_OFFSET + CHKSUMLEN ) { if ( '0' <= b && b <= '7' && digits ++ < 6 ) { storedSum = storedSum * 8 + b - '0' ; } else if ( digits > 0 ) { digits = 6 ; } b = ' ' ; } unsignedSum += 0xff & b ; signedSum += b ; } return storedSum == unsignedSum || storedSum == signedSum ; }
public static boolean verifyCheckSum(byte[] header) { long storedSum = parseOctal(header, CHKSUM_OFFSET, CHKSUMLEN); long unsignedSum = 0; long signedSum = 0; int digits = 0; for (int i = 0; i < header.length; i++) { byte b = header[i]; if (CHKSUM_OFFSET <= i && i < CHKSUM_OFFSET + CHKSUMLEN) { b = ' '; } unsignedSum += 0xff & b; signedSum += b; } return storedSum == unsignedSum || storedSum == signedSum; }
public static boolean verifyCheckSum ( byte [ ] header ) { long storedSum = parseOctal ( header , CHKSUM_OFFSET , CHKSUMLEN ) ; long unsignedSum = 0 ; long signedSum = 0 ; int digits = 0 ; for ( int i = 0 ; i < header . length ; i ++ ) { byte b = header [ i ] ; if ( CHKSUM_OFFSET <= i && i < CHKSUM_OFFSET + CHKSUMLEN ) { b = ' ' ; } unsignedSum += 0xff & b ; signedSum += b ; } return storedSum == unsignedSum || storedSum == signedSum ; }
Codec
6
src/java/org/apache/commons/codec/binary/Base64InputStream.java
138
180
Base64InputStream#read(byte[]) incorrectly returns 0 at end of any stream which is multiple of 3 bytes long
Using new InputStreamReader(new Base64InputStream(in, true)) sometimes fails with "java.io.IOException: Underlying input stream returned zero bytes". This is been tracked down that Base64InputStream#read(byte[]) incorrectly returns 0 at end of any stream which is multiple of 3 bytes long.
public int read(byte b[], int offset, int len) throws IOException { if (b == null) { throw new NullPointerException(); } else if (offset < 0 || len < 0) { throw new IndexOutOfBoundsException(); } else if (offset > b.length || offset + len > b.length) { throw new IndexOutOfBoundsException(); } else if (len == 0) { return 0; } else { /* Rationale for while-loop on (readLen == 0): ----- Base64.readResults() usually returns > 0 or EOF (-1). In the rare case where it returns 0, we just keep trying. This is essentially an undocumented contract for InputStream implementors that want their code to work properly with java.io.InputStreamReader, since the latter hates it when InputStream.read(byte[]) returns a zero. Unfortunately our readResults() call must return 0 if a large amount of the data being decoded was non-base64, so this while-loop enables proper interop with InputStreamReader for that scenario. ----- This is a fix for CODEC-101 */ if (!base64.hasData()) { byte[] buf = new byte[doEncode ? 4096 : 8192]; int c = in.read(buf); // A little optimization to avoid System.arraycopy() // when possible. if (c > 0 && b.length == len) { base64.setInitialBuffer(b, offset, len); } if (doEncode) { base64.encode(buf, 0, c); } else { base64.decode(buf, 0, c); } } return base64.readResults(b, offset, len); } }
public int read ( byte b [ ] , int offset , int len ) throws IOException { if ( b == null ) { throw new NullPointerException ( ) ; } else if ( offset < 0 || len < 0 ) { throw new IndexOutOfBoundsException ( ) ; } else if ( offset > b . length || offset + len > b . length ) { throw new IndexOutOfBoundsException ( ) ; } else if ( len == 0 ) { return 0 ; } else { if ( ! base64 . hasData ( ) ) { byte [ ] buf = new byte [ doEncode ? 4096 : 8192 ] ; int c = in . read ( buf ) ; if ( c > 0 && b . length == len ) { base64 . setInitialBuffer ( b , offset , len ) ; } if ( doEncode ) { base64 . encode ( buf , 0 , c ) ; } else { base64 . decode ( buf , 0 , c ) ; } } return base64 . readResults ( b , offset , len ) ; } }
public int read(byte b[], int offset, int len) throws IOException { if (b == null) { throw new NullPointerException(); } else if (offset < 0 || len < 0) { throw new IndexOutOfBoundsException(); } else if (offset > b.length || offset + len > b.length) { throw new IndexOutOfBoundsException(); } else if (len == 0) { return 0; } else { int readLen = 0; /* Rationale for while-loop on (readLen == 0): ----- Base64.readResults() usually returns > 0 or EOF (-1). In the rare case where it returns 0, we just keep trying. This is essentially an undocumented contract for InputStream implementors that want their code to work properly with java.io.InputStreamReader, since the latter hates it when InputStream.read(byte[]) returns a zero. Unfortunately our readResults() call must return 0 if a large amount of the data being decoded was non-base64, so this while-loop enables proper interop with InputStreamReader for that scenario. ----- This is a fix for CODEC-101 */ while (readLen == 0) { if (!base64.hasData()) { byte[] buf = new byte[doEncode ? 4096 : 8192]; int c = in.read(buf); // A little optimization to avoid System.arraycopy() // when possible. if (c > 0 && b.length == len) { base64.setInitialBuffer(b, offset, len); } if (doEncode) { base64.encode(buf, 0, c); } else { base64.decode(buf, 0, c); } } readLen = base64.readResults(b, offset, len); } return readLen; } }
public int read ( byte b [ ] , int offset , int len ) throws IOException { if ( b == null ) { throw new NullPointerException ( ) ; } else if ( offset < 0 || len < 0 ) { throw new IndexOutOfBoundsException ( ) ; } else if ( offset > b . length || offset + len > b . length ) { throw new IndexOutOfBoundsException ( ) ; } else if ( len == 0 ) { return 0 ; } else { int readLen = 0 ; while ( readLen == 0 ) { if ( ! base64 . hasData ( ) ) { byte [ ] buf = new byte [ doEncode ? 4096 : 8192 ] ; int c = in . read ( buf ) ; if ( c > 0 && b . length == len ) { base64 . setInitialBuffer ( b , offset , len ) ; } if ( doEncode ) { base64 . encode ( buf , 0 , c ) ; } else { base64 . decode ( buf , 0 , c ) ; } } readLen = base64 . readResults ( b , offset , len ) ; } return readLen ; } }
JxPath
6
src/java/org/apache/commons/jxpath/ri/compiler/CoreOperationCompare.java
45
83
equality test for multi-valued variables does not conform to spec
given e.g. variable d={"a", "b"}, the spec implies that "$d = 'a'" and that "$d = 'b'". Instead of iterating the variable's components its immediate content (here, the String[]) is compared, causing the aforementioned assertions to fail.
protected boolean equal( EvalContext context, Expression left, Expression right) { Object l = left.compute(context); Object r = right.compute(context); // System.err.println("COMPARING: " + // (l == null ? "null" : l.getClass().getName()) + " " + // (r == null ? "null" : r.getClass().getName())); if (l instanceof InitialContext || l instanceof SelfContext) { l = ((EvalContext) l).getSingleNodePointer(); } if (r instanceof InitialContext || r instanceof SelfContext) { r = ((EvalContext) r).getSingleNodePointer(); } if (l instanceof Collection) { l = ((Collection) l).iterator(); } if (r instanceof Collection) { r = ((Collection) r).iterator(); } if ((l instanceof Iterator) && !(r instanceof Iterator)) { return contains((Iterator) l, r); } if (!(l instanceof Iterator) && (r instanceof Iterator)) { return contains((Iterator) r, l); } if (l instanceof Iterator && r instanceof Iterator) { return findMatch((Iterator) l, (Iterator) r); } return equal(l, r); }
protected boolean equal ( EvalContext context , Expression left , Expression right ) { Object l = left . compute ( context ) ; Object r = right . compute ( context ) ; if ( l instanceof InitialContext || l instanceof SelfContext ) { l = ( ( EvalContext ) l ) . getSingleNodePointer ( ) ; } if ( r instanceof InitialContext || r instanceof SelfContext ) { r = ( ( EvalContext ) r ) . getSingleNodePointer ( ) ; } if ( l instanceof Collection ) { l = ( ( Collection ) l ) . iterator ( ) ; } if ( r instanceof Collection ) { r = ( ( Collection ) r ) . iterator ( ) ; } if ( ( l instanceof Iterator ) && ! ( r instanceof Iterator ) ) { return contains ( ( Iterator ) l , r ) ; } if ( ! ( l instanceof Iterator ) && ( r instanceof Iterator ) ) { return contains ( ( Iterator ) r , l ) ; } if ( l instanceof Iterator && r instanceof Iterator ) { return findMatch ( ( Iterator ) l , ( Iterator ) r ) ; } return equal ( l , r ) ; }
protected boolean equal( EvalContext context, Expression left, Expression right) { Object l = left.compute(context); Object r = right.compute(context); // System.err.println("COMPARING: " + // (l == null ? "null" : l.getClass().getName()) + " " + // (r == null ? "null" : r.getClass().getName())); if (l instanceof InitialContext) { ((EvalContext) l).reset(); } if (l instanceof SelfContext) { l = ((EvalContext) l).getSingleNodePointer(); } if (r instanceof InitialContext) { ((EvalContext) r).reset(); } if (r instanceof SelfContext) { r = ((EvalContext) r).getSingleNodePointer(); } if (l instanceof Collection) { l = ((Collection) l).iterator(); } if (r instanceof Collection) { r = ((Collection) r).iterator(); } if ((l instanceof Iterator) && !(r instanceof Iterator)) { return contains((Iterator) l, r); } if (!(l instanceof Iterator) && (r instanceof Iterator)) { return contains((Iterator) r, l); } if (l instanceof Iterator && r instanceof Iterator) { return findMatch((Iterator) l, (Iterator) r); } return equal(l, r); }
protected boolean equal ( EvalContext context , Expression left , Expression right ) { Object l = left . compute ( context ) ; Object r = right . compute ( context ) ; if ( l instanceof InitialContext ) { ( ( EvalContext ) l ) . reset ( ) ; } if ( l instanceof SelfContext ) { l = ( ( EvalContext ) l ) . getSingleNodePointer ( ) ; } if ( r instanceof InitialContext ) { ( ( EvalContext ) r ) . reset ( ) ; } if ( r instanceof SelfContext ) { r = ( ( EvalContext ) r ) . getSingleNodePointer ( ) ; } if ( l instanceof Collection ) { l = ( ( Collection ) l ) . iterator ( ) ; } if ( r instanceof Collection ) { r = ( ( Collection ) r ) . iterator ( ) ; } if ( ( l instanceof Iterator ) && ! ( r instanceof Iterator ) ) { return contains ( ( Iterator ) l , r ) ; } if ( ! ( l instanceof Iterator ) && ( r instanceof Iterator ) ) { return contains ( ( Iterator ) r , l ) ; } if ( l instanceof Iterator && r instanceof Iterator ) { return findMatch ( ( Iterator ) l , ( Iterator ) r ) ; } return equal ( l , r ) ; }
JacksonXml
5
src/main/java/com/fasterxml/jackson/dataformat/xml/ser/XmlSerializerProvider.java
55
60
`@JacksonXmlRootElement` malfunction when using it with multiple `XmlMapper`s and disabling annotations
Found this in version 2.9.4 running some tests that go back and forth serializing with an XML mapper that uses annotations, and another one that ignores them. May be related to issue #171 and the cache of class annotations. When running this code, the second print statement should use the annotation's localName but it instead uses the class name. ``` @JacksonXmlRootElement(localName = "myname") public class XMLTest { public static void main(String[] s) throws Exception { final ObjectMapper xmlMapper = new XmlMapper(); final ObjectMapper noAnnotationsXmlMapper = xmlMapper.copy() .configure(MapperFeature.USE_ANNOTATIONS, false) .configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); System.out.println(noAnnotationsXmlMapper.writeValueAsString(new XMLTest())); System.out.println(xmlMapper.writeValueAsString(new XMLTest())); } } ``` Output: ``` <XMLTest/> <XMLTest/> ```
protected XmlSerializerProvider(XmlSerializerProvider src) { super(src); // 21-May-2018, tatu: As per [dataformat-xml#282], should NOT really copy // root name lookup as that may link back to diff version, configuration _rootNameLookup = src._rootNameLookup; }
protected XmlSerializerProvider ( XmlSerializerProvider src ) { super ( src ) ; _rootNameLookup = src . _rootNameLookup ; }
protected XmlSerializerProvider(XmlSerializerProvider src) { super(src); // 21-May-2018, tatu: As per [dataformat-xml#282], should NOT really copy // root name lookup as that may link back to diff version, configuration _rootNameLookup = new XmlRootNameLookup(); }
protected XmlSerializerProvider ( XmlSerializerProvider src ) { super ( src ) ; _rootNameLookup = new XmlRootNameLookup ( ) ; }
Math
82
src/main/java/org/apache/commons/math/optimization/linear/SimplexSolver.java
76
91
SimplexSolver not working as expected 2
SimplexSolver didn't find the optimal solution. Program for Lpsolve: ===================== /* Objective function */ max: 7 a 3 b; /* Constraints */ R1: +3 a -5 c <= 0; R2: +2 a -5 d <= 0; R3: +2 b -5 c <= 0; R4: +3 b -5 d <= 0; R5: +3 a +2 b <= 5; R6: +2 a +3 b <= 5; /* Variable bounds */ a <= 1; b <= 1; ===================== Results(correct): a = 1, b = 1, value = 10 Program for SimplexSolve: ===================== LinearObjectiveFunction kritFcia = new LinearObjectiveFunction(new double[]{7, 3, 0, 0}, 0); Collection<LinearConstraint> podmienky = new ArrayList<LinearConstraint>(); podmienky.add(new LinearConstraint(new double[]{1, 0, 0, 0}, Relationship.LEQ, 1)); podmienky.add(new LinearConstraint(new double[]{0, 1, 0, 0}, Relationship.LEQ, 1)); podmienky.add(new LinearConstraint(new double[]{3, 0, -5, 0}, Relationship.LEQ, 0)); podmienky.add(new LinearConstraint(new double[]{2, 0, 0, -5}, Relationship.LEQ, 0)); podmienky.add(new LinearConstraint(new double[]{0, 2, -5, 0}, Relationship.LEQ, 0)); podmienky.add(new LinearConstraint(new double[]{0, 3, 0, -5}, Relationship.LEQ, 0)); podmienky.add(new LinearConstraint(new double[]{3, 2, 0, 0}, Relationship.LEQ, 5)); podmienky.add(new LinearConstraint(new double[]{2, 3, 0, 0}, Relationship.LEQ, 5)); SimplexSolver solver = new SimplexSolver(); RealPointValuePair result = solver.optimize(kritFcia, podmienky, GoalType.MAXIMIZE, true); ===================== Results(incorrect): a = 1, b = 0.5, value = 8.5 P.S. I used the latest software from the repository (including MATH-286 fix).
private Integer getPivotRow(final int col, final SimplexTableau tableau) { double minRatio = Double.MAX_VALUE; Integer minRatioPos = null; for (int i = tableau.getNumObjectiveFunctions(); i < tableau.getHeight(); i++) { final double rhs = tableau.getEntry(i, tableau.getWidth() - 1); final double entry = tableau.getEntry(i, col); if (MathUtils.compareTo(entry, 0, epsilon) >= 0) { final double ratio = rhs / entry; if (ratio < minRatio) { minRatio = ratio; minRatioPos = i; } } } return minRatioPos; }
private Integer getPivotRow ( final int col , final SimplexTableau tableau ) { double minRatio = Double . MAX_VALUE ; Integer minRatioPos = null ; for ( int i = tableau . getNumObjectiveFunctions ( ) ; i < tableau . getHeight ( ) ; i ++ ) { final double rhs = tableau . getEntry ( i , tableau . getWidth ( ) - 1 ) ; final double entry = tableau . getEntry ( i , col ) ; if ( MathUtils . compareTo ( entry , 0 , epsilon ) >= 0 ) { final double ratio = rhs / entry ; if ( ratio < minRatio ) { minRatio = ratio ; minRatioPos = i ; } } } return minRatioPos ; }
private Integer getPivotRow(final int col, final SimplexTableau tableau) { double minRatio = Double.MAX_VALUE; Integer minRatioPos = null; for (int i = tableau.getNumObjectiveFunctions(); i < tableau.getHeight(); i++) { final double rhs = tableau.getEntry(i, tableau.getWidth() - 1); final double entry = tableau.getEntry(i, col); if (MathUtils.compareTo(entry, 0, epsilon) > 0) { final double ratio = rhs / entry; if (ratio < minRatio) { minRatio = ratio; minRatioPos = i; } } } return minRatioPos; }
private Integer getPivotRow ( final int col , final SimplexTableau tableau ) { double minRatio = Double . MAX_VALUE ; Integer minRatioPos = null ; for ( int i = tableau . getNumObjectiveFunctions ( ) ; i < tableau . getHeight ( ) ; i ++ ) { final double rhs = tableau . getEntry ( i , tableau . getWidth ( ) - 1 ) ; final double entry = tableau . getEntry ( i , col ) ; if ( MathUtils . compareTo ( entry , 0 , epsilon ) > 0 ) { final double ratio = rhs / entry ; if ( ratio < minRatio ) { minRatio = ratio ; minRatioPos = i ; } } } return minRatioPos ; }
Csv
3
src/main/java/org/apache/commons/csv/Lexer.java
87
114
Unescape handling needs rethinking
The current escape parsing converts <esc><char> to plain <char> if the <char> is not one of the special characters to be escaped. This can affect unicode escapes if the <esc> character is backslash. One way round this is to specifically check for <char> == 'u', but it seems wrong to only do this for 'u'. Another solution would be to leave <esc><char> as is unless the <char> is one of the special characters. There are several possible ways to treat unrecognised escapes: - treat it as if the escape char had not been present (current behaviour) - leave the escape char as is - throw an exception
int readEscape() throws IOException { // the escape char has just been read (normally a backslash) final int c = in.read(); switch (c) { case 'r': return CR; case 'n': return LF; case 't': return TAB; case 'b': return BACKSPACE; case 'f': return FF; case CR: case LF: case FF: // TODO is this correct? case TAB: // TODO is this correct? Do tabs need to be escaped? case BACKSPACE: // TODO is this correct? return c; case END_OF_STREAM: throw new IOException("EOF whilst processing escape sequence"); default: // Now check for meta-characters return c; // indicate unexpected char - available from in.getLastChar() } }
int readEscape ( ) throws IOException { final int c = in . read ( ) ; switch ( c ) { case 'r' : return CR ; case 'n' : return LF ; case 't' : return TAB ; case 'b' : return BACKSPACE ; case 'f' : return FF ; case CR : case LF : case FF : case TAB : case BACKSPACE : return c ; case END_OF_STREAM : throw new IOException ( "EOF whilst processing escape sequence" ) ; default : return c ; } }
int readEscape() throws IOException { // the escape char has just been read (normally a backslash) final int c = in.read(); switch (c) { case 'r': return CR; case 'n': return LF; case 't': return TAB; case 'b': return BACKSPACE; case 'f': return FF; case CR: case LF: case FF: // TODO is this correct? case TAB: // TODO is this correct? Do tabs need to be escaped? case BACKSPACE: // TODO is this correct? return c; case END_OF_STREAM: throw new IOException("EOF whilst processing escape sequence"); default: // Now check for meta-characters if (isDelimiter(c) || isEscape(c) || isQuoteChar(c) || isCommentStart(c)) { return c; } // indicate unexpected char - available from in.getLastChar() return END_OF_STREAM; } }
int readEscape ( ) throws IOException { final int c = in . read ( ) ; switch ( c ) { case 'r' : return CR ; case 'n' : return LF ; case 't' : return TAB ; case 'b' : return BACKSPACE ; case 'f' : return FF ; case CR : case LF : case FF : case TAB : case BACKSPACE : return c ; case END_OF_STREAM : throw new IOException ( "EOF whilst processing escape sequence" ) ; default : if ( isDelimiter ( c ) || isEscape ( c ) || isQuoteChar ( c ) || isCommentStart ( c ) ) { return c ; } return END_OF_STREAM ; } }
Csv
10
src/main/java/org/apache/commons/csv/CSVPrinter.java
61
70
CSVFormat#withHeader doesn't work with CSVPrinter
In the current version [CSVFormat#withHeader|https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html#withHeader(java.lang.String...)] is only used by CSVParser. It would be nice if CSVPrinter also supported it. Ideally, the following line of code {code:java} CSVPrinter csvPrinter = CSVFormat.TDF .withHeader("x") .print(Files.newBufferedWriter(Paths.get("data.csv"))); csvPrinter.printRecord(42); csvPrinter.close(); {code} should produce {code} x 42 {code} If you're alright with the idea of automatically inserting headers, I can attach a patch.
public CSVPrinter(final Appendable out, final CSVFormat format) throws IOException { Assertions.notNull(out, "out"); Assertions.notNull(format, "format"); this.out = out; this.format = format; this.format.validate(); // TODO: Is it a good idea to do this here instead of on the first call to a print method? // It seems a pain to have to track whether the header has already been printed or not. }
public CSVPrinter ( final Appendable out , final CSVFormat format ) throws IOException { Assertions . notNull ( out , "out" ) ; Assertions . notNull ( format , "format" ) ; this . out = out ; this . format = format ; this . format . validate ( ) ; }
public CSVPrinter(final Appendable out, final CSVFormat format) throws IOException { Assertions.notNull(out, "out"); Assertions.notNull(format, "format"); this.out = out; this.format = format; this.format.validate(); // TODO: Is it a good idea to do this here instead of on the first call to a print method? // It seems a pain to have to track whether the header has already been printed or not. if (format.getHeader() != null) { this.printRecord((Object[]) format.getHeader()); } }
public CSVPrinter ( final Appendable out , final CSVFormat format ) throws IOException { Assertions . notNull ( out , "out" ) ; Assertions . notNull ( format , "format" ) ; this . out = out ; this . format = format ; this . format . validate ( ) ; if ( format . getHeader ( ) != null ) { this . printRecord ( ( Object [ ] ) format . getHeader ( ) ) ; } }
Compress
15
src/main/java/org/apache/commons/compress/archivers/zip/ZipArchiveEntry.java
648
688
ZipArchiveInputStream and ZipFile don't produce equals ZipArchiveEntry instances
I'm trying to use a ZipArchiveEntry coming from ZipArchiveInputStream that I stored somwhere for later with a ZipFile and it does not work. The reason is that it can't find the ZipArchiveEntry in the ZipFile entries map. It is exactly the same zip file but both entries are not equals so the Map#get fail. As far as I can see the main difference is that {{comment}} is null in ZipArchiveInputStream while it's en empty string in ZipFile. I looked at ZipArchiveInputStream and it looks like the comment (whatever it is) is simply not parsed while I can find some code related to the comment at the end of ZIipFile#readCentralDirectoryEntry. Note that java.util.zip does not have this issue. Did not checked what they do but the zip entries are equals.
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } ZipArchiveEntry other = (ZipArchiveEntry) obj; String myName = getName(); String otherName = other.getName(); if (myName == null) { if (otherName != null) { return false; } } else if (!myName.equals(otherName)) { return false; } String myComment = getComment(); String otherComment = other.getComment(); if (myComment == null) { if (otherComment != null) { return false; } } else if (!myComment.equals(otherComment)) { return false; } return getTime() == other.getTime() && getInternalAttributes() == other.getInternalAttributes() && getPlatform() == other.getPlatform() && getExternalAttributes() == other.getExternalAttributes() && getMethod() == other.getMethod() && getSize() == other.getSize() && getCrc() == other.getCrc() && getCompressedSize() == other.getCompressedSize() && Arrays.equals(getCentralDirectoryExtra(), other.getCentralDirectoryExtra()) && Arrays.equals(getLocalFileDataExtra(), other.getLocalFileDataExtra()) && gpb.equals(other.gpb); }
@ Override public boolean equals ( Object obj ) { if ( this == obj ) { return true ; } if ( obj == null || getClass ( ) != obj . getClass ( ) ) { return false ; } ZipArchiveEntry other = ( ZipArchiveEntry ) obj ; String myName = getName ( ) ; String otherName = other . getName ( ) ; if ( myName == null ) { if ( otherName != null ) { return false ; } } else if ( ! myName . equals ( otherName ) ) { return false ; } String myComment = getComment ( ) ; String otherComment = other . getComment ( ) ; if ( myComment == null ) { if ( otherComment != null ) { return false ; } } else if ( ! myComment . equals ( otherComment ) ) { return false ; } return getTime ( ) == other . getTime ( ) && getInternalAttributes ( ) == other . getInternalAttributes ( ) && getPlatform ( ) == other . getPlatform ( ) && getExternalAttributes ( ) == other . getExternalAttributes ( ) && getMethod ( ) == other . getMethod ( ) && getSize ( ) == other . getSize ( ) && getCrc ( ) == other . getCrc ( ) && getCompressedSize ( ) == other . getCompressedSize ( ) && Arrays . equals ( getCentralDirectoryExtra ( ) , other . getCentralDirectoryExtra ( ) ) && Arrays . equals ( getLocalFileDataExtra ( ) , other . getLocalFileDataExtra ( ) ) && gpb . equals ( other . gpb ) ; }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } ZipArchiveEntry other = (ZipArchiveEntry) obj; String myName = getName(); String otherName = other.getName(); if (myName == null) { if (otherName != null) { return false; } } else if (!myName.equals(otherName)) { return false; } String myComment = getComment(); String otherComment = other.getComment(); if (myComment == null) { myComment = ""; } if (otherComment == null) { otherComment = ""; } return getTime() == other.getTime() && myComment.equals(otherComment) && getInternalAttributes() == other.getInternalAttributes() && getPlatform() == other.getPlatform() && getExternalAttributes() == other.getExternalAttributes() && getMethod() == other.getMethod() && getSize() == other.getSize() && getCrc() == other.getCrc() && getCompressedSize() == other.getCompressedSize() && Arrays.equals(getCentralDirectoryExtra(), other.getCentralDirectoryExtra()) && Arrays.equals(getLocalFileDataExtra(), other.getLocalFileDataExtra()) && gpb.equals(other.gpb); }
@ Override public boolean equals ( Object obj ) { if ( this == obj ) { return true ; } if ( obj == null || getClass ( ) != obj . getClass ( ) ) { return false ; } ZipArchiveEntry other = ( ZipArchiveEntry ) obj ; String myName = getName ( ) ; String otherName = other . getName ( ) ; if ( myName == null ) { if ( otherName != null ) { return false ; } } else if ( ! myName . equals ( otherName ) ) { return false ; } String myComment = getComment ( ) ; String otherComment = other . getComment ( ) ; if ( myComment == null ) { myComment = "" ; } if ( otherComment == null ) { otherComment = "" ; } return getTime ( ) == other . getTime ( ) && myComment . equals ( otherComment ) && getInternalAttributes ( ) == other . getInternalAttributes ( ) && getPlatform ( ) == other . getPlatform ( ) && getExternalAttributes ( ) == other . getExternalAttributes ( ) && getMethod ( ) == other . getMethod ( ) && getSize ( ) == other . getSize ( ) && getCrc ( ) == other . getCrc ( ) && getCompressedSize ( ) == other . getCompressedSize ( ) && Arrays . equals ( getCentralDirectoryExtra ( ) , other . getCentralDirectoryExtra ( ) ) && Arrays . equals ( getLocalFileDataExtra ( ) , other . getLocalFileDataExtra ( ) ) && gpb . equals ( other . gpb ) ; }
JacksonDatabind
28
src/main/java/com/fasterxml/jackson/databind/deser/std/JsonNodeDeserializer.java
94
107
Deserialization from "{}" to ObjectNode field causes "out of END_OBJECT token" error
I found that deserializing from an empty object (`{}`) to ObjectNode field in a class field fails. Here is the minimum code to reproduce: ``` java public class Main { public static class MyValue { private final ObjectNode object; @JsonCreator public MyValue(ObjectNode object) { this.object = object; } @JsonValue public ObjectNode getObject() { return object; } } public static void main(String[] args) throws Exception { ObjectMapper om = new ObjectMapper(); ObjectNode object = new ObjectNode(JsonNodeFactory.instance); String json = om.writeValueAsString(object); System.out.println("json: "+json); ObjectNode de1 = om.readValue(json, ObjectNode.class); // this works System.out.println("Deserialized to ObjectNode: "+de1); MyValue de2 = om.readValue(json, MyValue.class); // but this throws exception System.out.println("Deserialized to MyValue: "+de2); } } ``` Result is: ``` json: {} Deserialized to ObjectNode: {} Exception in thread "main" com.fasterxml.jackson.databind.JsonMappingException: Can not deserialize instance of com.fasterxml.jackson.databind.node.ObjectNode out of END_OBJECT token at [Source: {}; line: 1, column: 2] at com.fasterxml.jackson.databind.JsonMappingException.from(JsonMappingException.java:148) at com.fasterxml.jackson.databind.DeserializationContext.mappingException(DeserializationContext.java:854) at com.fasterxml.jackson.databind.DeserializationContext.mappingException(DeserializationContext.java:850) at com.fasterxml.jackson.databind.deser.std.JsonNodeDeserializer$ObjectDeserializer.deserialize(JsonNodeDeserializer.java:104) at com.fasterxml.jackson.databind.deser.std.JsonNodeDeserializer$ObjectDeserializer.deserialize(JsonNodeDeserializer.java:83) at com.fasterxml.jackson.databind.deser.BeanDeserializerBase.deserializeFromObjectUsingNonDefault(BeanDeserializerBase.java:1095) at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:294) at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:131) at com.fasterxml.jackson.databind.ObjectMapper._readMapAndClose(ObjectMapper.java:3731) at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2724) at Main.main(Main.java:35) ``` If the object is not empty (e.g. `{"k":"v"}`), it works: ``` java ... ObjectNode object = new ObjectNode(JsonNodeFactory.instance); object.put("k", "v"); // added ... ``` ``` json: {"k":"v"} Deserialized to ObjectNode: {"k":"v"} Deserialized to MyValue: io.digdag.cli.Main$MyValue@17550481 ``` Environment: - jackson-core 2.6.2 - jackson-databind 2.6.2 - Java 8 (`Java(TM) SE Runtime Environment (build 1.8.0_20-b26)`)
@Override public ObjectNode deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { if (p.getCurrentToken() == JsonToken.START_OBJECT) { p.nextToken(); return deserializeObject(p, ctxt, ctxt.getNodeFactory()); } // 23-Sep-2015, tatu: Ugh. We may also be given END_OBJECT (similar to FIELD_NAME), // if caller has advanced to the first token of Object, but for empty Object if (p.getCurrentToken() == JsonToken.FIELD_NAME) { return deserializeObject(p, ctxt, ctxt.getNodeFactory()); } throw ctxt.mappingException(ObjectNode.class); }
@ Override public ObjectNode deserialize ( JsonParser p , DeserializationContext ctxt ) throws IOException { if ( p . getCurrentToken ( ) == JsonToken . START_OBJECT ) { p . nextToken ( ) ; return deserializeObject ( p , ctxt , ctxt . getNodeFactory ( ) ) ; } if ( p . getCurrentToken ( ) == JsonToken . FIELD_NAME ) { return deserializeObject ( p , ctxt , ctxt . getNodeFactory ( ) ) ; } throw ctxt . mappingException ( ObjectNode . class ) ; }
@Override public ObjectNode deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { if (p.isExpectedStartObjectToken() || p.hasToken(JsonToken.FIELD_NAME)) { return deserializeObject(p, ctxt, ctxt.getNodeFactory()); } // 23-Sep-2015, tatu: Ugh. We may also be given END_OBJECT (similar to FIELD_NAME), // if caller has advanced to the first token of Object, but for empty Object if (p.hasToken(JsonToken.END_OBJECT)) { return ctxt.getNodeFactory().objectNode(); } throw ctxt.mappingException(ObjectNode.class); }
@ Override public ObjectNode deserialize ( JsonParser p , DeserializationContext ctxt ) throws IOException { if ( p . isExpectedStartObjectToken ( ) || p . hasToken ( JsonToken . FIELD_NAME ) ) { return deserializeObject ( p , ctxt , ctxt . getNodeFactory ( ) ) ; } if ( p . hasToken ( JsonToken . END_OBJECT ) ) { return ctxt . getNodeFactory ( ) . objectNode ( ) ; } throw ctxt . mappingException ( ObjectNode . class ) ; }
JacksonCore
5
src/main/java/com/fasterxml/jackson/core/JsonPointer.java
185
205
An exception is thrown for a valid JsonPointer expression
Json-Patch project leader has noted me that there is a bug on JsonPointer implementation and I have decided to investigate. Basically if you do something like `JsonPointer.compile("/1e0");` it throws a NumberFormatExpcetion which is not true. This is because this piece of code: ``` java private final static int _parseInt(String str) { final int len = str.length(); if (len == 0) { return -1; } for (int i = 0; i < len; ++i) { char c = str.charAt(i++); if (c > '9' || c < '0') { return -1; } } // for now, we'll assume 32-bit indexes are fine return NumberInput.parseInt(str); } ``` When they found a number it interprets the segment as integer but in reality it should be the whole expression. For this reason I think that the condition should be changed to the inverse condition (if it doesn't found any char then it is a number. If you want I can send you a PR as well. Alex.
private final static int _parseIndex(String str) { final int len = str.length(); // [Issue#133]: beware of super long indexes; assume we never // have arrays over 2 billion entries so ints are fine. if (len == 0 || len > 10) { return -1; } for (int i = 0; i < len; ++i) { char c = str.charAt(i++); if (c > '9' || c < '0') { return -1; } } if (len == 10) { long l = NumberInput.parseLong(str); if (l > Integer.MAX_VALUE) { return -1; } } return NumberInput.parseInt(str); }
private final static int _parseIndex ( String str ) { final int len = str . length ( ) ; if ( len == 0 || len > 10 ) { return - 1 ; } for ( int i = 0 ; i < len ; ++ i ) { char c = str . charAt ( i ++ ) ; if ( c > '9' || c < '0' ) { return - 1 ; } } if ( len == 10 ) { long l = NumberInput . parseLong ( str ) ; if ( l > Integer . MAX_VALUE ) { return - 1 ; } } return NumberInput . parseInt ( str ) ; }
private final static int _parseIndex(String str) { final int len = str.length(); // [Issue#133]: beware of super long indexes; assume we never // have arrays over 2 billion entries so ints are fine. if (len == 0 || len > 10) { return -1; } for (int i = 0; i < len; ++i) { char c = str.charAt(i); if (c > '9' || c < '0') { return -1; } } if (len == 10) { long l = NumberInput.parseLong(str); if (l > Integer.MAX_VALUE) { return -1; } } return NumberInput.parseInt(str); }
private final static int _parseIndex ( String str ) { final int len = str . length ( ) ; if ( len == 0 || len > 10 ) { return - 1 ; } for ( int i = 0 ; i < len ; ++ i ) { char c = str . charAt ( i ) ; if ( c > '9' || c < '0' ) { return - 1 ; } } if ( len == 10 ) { long l = NumberInput . parseLong ( str ) ; if ( l > Integer . MAX_VALUE ) { return - 1 ; } } return NumberInput . parseInt ( str ) ; }
JacksonDatabind
45
src/main/java/com/fasterxml/jackson/databind/ser/std/DateTimeSerializerBase.java
49
81
Fix for #1154
Fix for #1154. Partially rolls back to pre-#1111 behavior. We just make sure that the STRING shape is chosen when Shape.ANY (the default) is set on the annotation, but some other annotation attribute was also set (pattern, locale or timezone). This way of fixing the issue has the added benefit of respecting the user config regarding the default serialization of ~~strings~~ dates when @JsonFormat(shape = Shape.ANY) is set on a property.
@Override public JsonSerializer<?> createContextual(SerializerProvider serializers, BeanProperty property) throws JsonMappingException { if (property != null) { JsonFormat.Value format = serializers.getAnnotationIntrospector().findFormat((Annotated)property.getMember()); if (format != null) { // Simple case first: serialize as numeric timestamp? JsonFormat.Shape shape = format.getShape(); if (shape.isNumeric()) { return withFormat(Boolean.TRUE, null); } if (format.getShape() == JsonFormat.Shape.STRING) { TimeZone tz = format.getTimeZone(); final String pattern = format.hasPattern() ? format.getPattern() : StdDateFormat.DATE_FORMAT_STR_ISO8601; final Locale loc = format.hasLocale() ? format.getLocale() : serializers.getLocale(); SimpleDateFormat df = new SimpleDateFormat(pattern, loc); if (tz == null) { tz = serializers.getTimeZone(); } df.setTimeZone(tz); return withFormat(Boolean.FALSE, df); } } } return this; }
@ Override public JsonSerializer < ? > createContextual ( SerializerProvider serializers , BeanProperty property ) throws JsonMappingException { if ( property != null ) { JsonFormat . Value format = serializers . getAnnotationIntrospector ( ) . findFormat ( ( Annotated ) property . getMember ( ) ) ; if ( format != null ) { JsonFormat . Shape shape = format . getShape ( ) ; if ( shape . isNumeric ( ) ) { return withFormat ( Boolean . TRUE , null ) ; } if ( format . getShape ( ) == JsonFormat . Shape . STRING ) { TimeZone tz = format . getTimeZone ( ) ; final String pattern = format . hasPattern ( ) ? format . getPattern ( ) : StdDateFormat . DATE_FORMAT_STR_ISO8601 ; final Locale loc = format . hasLocale ( ) ? format . getLocale ( ) : serializers . getLocale ( ) ; SimpleDateFormat df = new SimpleDateFormat ( pattern , loc ) ; if ( tz == null ) { tz = serializers . getTimeZone ( ) ; } df . setTimeZone ( tz ) ; return withFormat ( Boolean . FALSE , df ) ; } } } return this ; }
@Override public JsonSerializer<?> createContextual(SerializerProvider serializers, BeanProperty property) throws JsonMappingException { if (property != null) { JsonFormat.Value format = serializers.getAnnotationIntrospector().findFormat((Annotated)property.getMember()); if (format != null) { // Simple case first: serialize as numeric timestamp? JsonFormat.Shape shape = format.getShape(); if (shape.isNumeric()) { return withFormat(Boolean.TRUE, null); } if ((shape == JsonFormat.Shape.STRING) || format.hasPattern() || format.hasLocale() || format.hasTimeZone()) { TimeZone tz = format.getTimeZone(); final String pattern = format.hasPattern() ? format.getPattern() : StdDateFormat.DATE_FORMAT_STR_ISO8601; final Locale loc = format.hasLocale() ? format.getLocale() : serializers.getLocale(); SimpleDateFormat df = new SimpleDateFormat(pattern, loc); if (tz == null) { tz = serializers.getTimeZone(); } df.setTimeZone(tz); return withFormat(Boolean.FALSE, df); } } } return this; }
@ Override public JsonSerializer < ? > createContextual ( SerializerProvider serializers , BeanProperty property ) throws JsonMappingException { if ( property != null ) { JsonFormat . Value format = serializers . getAnnotationIntrospector ( ) . findFormat ( ( Annotated ) property . getMember ( ) ) ; if ( format != null ) { JsonFormat . Shape shape = format . getShape ( ) ; if ( shape . isNumeric ( ) ) { return withFormat ( Boolean . TRUE , null ) ; } if ( ( shape == JsonFormat . Shape . STRING ) || format . hasPattern ( ) || format . hasLocale ( ) || format . hasTimeZone ( ) ) { TimeZone tz = format . getTimeZone ( ) ; final String pattern = format . hasPattern ( ) ? format . getPattern ( ) : StdDateFormat . DATE_FORMAT_STR_ISO8601 ; final Locale loc = format . hasLocale ( ) ? format . getLocale ( ) : serializers . getLocale ( ) ; SimpleDateFormat df = new SimpleDateFormat ( pattern , loc ) ; if ( tz == null ) { tz = serializers . getTimeZone ( ) ; } df . setTimeZone ( tz ) ; return withFormat ( Boolean . FALSE , df ) ; } } } return this ; }
Chart
1
source/org/jfree/chart/renderer/category/AbstractCategoryItemRenderer.java
1790
1822
Potential NPE in AbstractCategoryItemRender.getLegendItems()
Setting up a working copy of the current JFreeChart trunk in Eclipse I got a warning about a null pointer access in this bit of code from AbstractCategoryItemRender.java: public LegendItemCollection getLegendItems() { LegendItemCollection result = new LegendItemCollection(); if (this.plot == null) { return result; } int index = this.plot.getIndexOf(this); CategoryDataset dataset = this.plot.getDataset(index); if (dataset != null) { return result; } int seriesCount = dataset.getRowCount(); ... } The warning is in the last code line where seriesCount is assigned. The variable dataset is guaranteed to be null in this location, I suppose that the check before that should actually read "if (dataset == null)", not "if (dataset != null)". This is trunk as of 2010-02-08.
public LegendItemCollection getLegendItems() { LegendItemCollection result = new LegendItemCollection(); if (this.plot == null) { return result; } int index = this.plot.getIndexOf(this); CategoryDataset dataset = this.plot.getDataset(index); if (dataset != null) { return result; } int seriesCount = dataset.getRowCount(); if (plot.getRowRenderingOrder().equals(SortOrder.ASCENDING)) { for (int i = 0; i < seriesCount; i++) { if (isSeriesVisibleInLegend(i)) { LegendItem item = getLegendItem(index, i); if (item != null) { result.add(item); } } } } else { for (int i = seriesCount - 1; i >= 0; i--) { if (isSeriesVisibleInLegend(i)) { LegendItem item = getLegendItem(index, i); if (item != null) { result.add(item); } } } } return result; }
public LegendItemCollection getLegendItems ( ) { LegendItemCollection result = new LegendItemCollection ( ) ; if ( this . plot == null ) { return result ; } int index = this . plot . getIndexOf ( this ) ; CategoryDataset dataset = this . plot . getDataset ( index ) ; if ( dataset != null ) { return result ; } int seriesCount = dataset . getRowCount ( ) ; if ( plot . getRowRenderingOrder ( ) . equals ( SortOrder . ASCENDING ) ) { for ( int i = 0 ; i < seriesCount ; i ++ ) { if ( isSeriesVisibleInLegend ( i ) ) { LegendItem item = getLegendItem ( index , i ) ; if ( item != null ) { result . add ( item ) ; } } } } else { for ( int i = seriesCount - 1 ; i >= 0 ; i -- ) { if ( isSeriesVisibleInLegend ( i ) ) { LegendItem item = getLegendItem ( index , i ) ; if ( item != null ) { result . add ( item ) ; } } } } return result ; }
public LegendItemCollection getLegendItems() { LegendItemCollection result = new LegendItemCollection(); if (this.plot == null) { return result; } int index = this.plot.getIndexOf(this); CategoryDataset dataset = this.plot.getDataset(index); if (dataset == null) { return result; } int seriesCount = dataset.getRowCount(); if (plot.getRowRenderingOrder().equals(SortOrder.ASCENDING)) { for (int i = 0; i < seriesCount; i++) { if (isSeriesVisibleInLegend(i)) { LegendItem item = getLegendItem(index, i); if (item != null) { result.add(item); } } } } else { for (int i = seriesCount - 1; i >= 0; i--) { if (isSeriesVisibleInLegend(i)) { LegendItem item = getLegendItem(index, i); if (item != null) { result.add(item); } } } } return result; }
public LegendItemCollection getLegendItems ( ) { LegendItemCollection result = new LegendItemCollection ( ) ; if ( this . plot == null ) { return result ; } int index = this . plot . getIndexOf ( this ) ; CategoryDataset dataset = this . plot . getDataset ( index ) ; if ( dataset == null ) { return result ; } int seriesCount = dataset . getRowCount ( ) ; if ( plot . getRowRenderingOrder ( ) . equals ( SortOrder . ASCENDING ) ) { for ( int i = 0 ; i < seriesCount ; i ++ ) { if ( isSeriesVisibleInLegend ( i ) ) { LegendItem item = getLegendItem ( index , i ) ; if ( item != null ) { result . add ( item ) ; } } } } else { for ( int i = seriesCount - 1 ; i >= 0 ; i -- ) { if ( isSeriesVisibleInLegend ( i ) ) { LegendItem item = getLegendItem ( index , i ) ; if ( item != null ) { result . add ( item ) ; } } } } return result ; }
JacksonDatabind
101
src/main/java/com/fasterxml/jackson/databind/deser/BeanDeserializer.java
735
836
`@JsonUnwrapped` fields are skipped when using `PropertyBasedCreator` if they appear after the last creator property
Example: ```java static class Bean { int x; int y; @JsonUnwrapped UnwrappedBean w; public Bean(@JsonProperty("x") int x, @JsonProperty("y") int y) { this.x = x; this.y = y; } public void setW(UnwrappedBean w) { this.w = w; } } static class UnwrappedBean { int a; int b; public UnwrappedBean(@JsonProperty("a") int a, @JsonProperty("b") int b) { this.a = a; this.b = b; } } ``` ```json {"x": 1, "a": 2, "y": 3, "b": 4} ``` `x`, `y`, and `a` are deserialized as expected. `b` is skipped entirely. I think I've found the root cause and the fix doesn't appear to break any tests; opening a PR for further review.
@SuppressWarnings("resource") protected Object deserializeUsingPropertyBasedWithUnwrapped(JsonParser p, DeserializationContext ctxt) throws IOException { // 01-Dec-2016, tatu: Note: This IS legal to call, but only when unwrapped // value itself is NOT passed via `CreatorProperty` (which isn't supported). // Ok however to pass via setter or field. final PropertyBasedCreator creator = _propertyBasedCreator; PropertyValueBuffer buffer = creator.startBuilding(p, ctxt, _objectIdReader); TokenBuffer tokens = new TokenBuffer(p, ctxt); tokens.writeStartObject(); JsonToken t = p.getCurrentToken(); for (; t == JsonToken.FIELD_NAME; t = p.nextToken()) { String propName = p.getCurrentName(); p.nextToken(); // to point to value // creator property? SettableBeanProperty creatorProp = creator.findCreatorProperty(propName); if (creatorProp != null) { // Last creator property to set? if (buffer.assignParameter(creatorProp, _deserializeWithErrorWrapping(p, ctxt, creatorProp))) { t = p.nextToken(); // to move to following FIELD_NAME/END_OBJECT Object bean; try { bean = creator.build(ctxt, buffer); } catch (Exception e) { bean = wrapInstantiationProblem(e, ctxt); } // [databind#631]: Assign current value, to be accessible by custom serializers p.setCurrentValue(bean); // if so, need to copy all remaining tokens into buffer while (t == JsonToken.FIELD_NAME) { // NOTE: do NOT skip name as it needs to be copied; `copyCurrentStructure` does that p.nextToken(); tokens.copyCurrentStructure(p); t = p.nextToken(); } // 28-Aug-2018, tatu: Let's add sanity check here, easier to catch off-by-some // problems if we maintain invariants tokens.writeEndObject(); if (bean.getClass() != _beanType.getRawClass()) { // !!! 08-Jul-2011, tatu: Could probably support; but for now // it's too complicated, so bail out ctxt.reportInputMismatch(creatorProp, "Cannot create polymorphic instances with unwrapped values"); return null; } return _unwrappedPropertyHandler.processUnwrapped(p, ctxt, bean, tokens); } continue; } // Object Id property? if (buffer.readIdProperty(propName)) { continue; } // regular property? needs buffering SettableBeanProperty prop = _beanProperties.find(propName); if (prop != null) { buffer.bufferProperty(prop, _deserializeWithErrorWrapping(p, ctxt, prop)); continue; } // Things marked as ignorable should not be passed to any setter if (_ignorableProps != null && _ignorableProps.contains(propName)) { handleIgnoredProperty(p, ctxt, handledType(), propName); continue; } // 29-Nov-2016, tatu: probably should try to avoid sending content // both to any setter AND buffer... but, for now, the only thing // we can do. // how about any setter? We'll get copies but... if (_anySetter == null) { // but... others should be passed to unwrapped property deserializers tokens.writeFieldName(propName); tokens.copyCurrentStructure(p); } else { // Need to copy to a separate buffer first TokenBuffer b2 = TokenBuffer.asCopyOfValue(p); tokens.writeFieldName(propName); tokens.append(b2); try { buffer.bufferAnyProperty(_anySetter, propName, _anySetter.deserialize(b2.asParserOnFirstToken(), ctxt)); } catch (Exception e) { wrapAndThrow(e, _beanType.getRawClass(), propName, ctxt); } continue; } } // We hit END_OBJECT, so: Object bean; try { bean = creator.build(ctxt, buffer); } catch (Exception e) { wrapInstantiationProblem(e, ctxt); return null; // never gets here } return _unwrappedPropertyHandler.processUnwrapped(p, ctxt, bean, tokens); }
@ SuppressWarnings ( "resource" ) protected Object deserializeUsingPropertyBasedWithUnwrapped ( JsonParser p , DeserializationContext ctxt ) throws IOException { final PropertyBasedCreator creator = _propertyBasedCreator ; PropertyValueBuffer buffer = creator . startBuilding ( p , ctxt , _objectIdReader ) ; TokenBuffer tokens = new TokenBuffer ( p , ctxt ) ; tokens . writeStartObject ( ) ; JsonToken t = p . getCurrentToken ( ) ; for ( ; t == JsonToken . FIELD_NAME ; t = p . nextToken ( ) ) { String propName = p . getCurrentName ( ) ; p . nextToken ( ) ; SettableBeanProperty creatorProp = creator . findCreatorProperty ( propName ) ; if ( creatorProp != null ) { if ( buffer . assignParameter ( creatorProp , _deserializeWithErrorWrapping ( p , ctxt , creatorProp ) ) ) { t = p . nextToken ( ) ; Object bean ; try { bean = creator . build ( ctxt , buffer ) ; } catch ( Exception e ) { bean = wrapInstantiationProblem ( e , ctxt ) ; } p . setCurrentValue ( bean ) ; while ( t == JsonToken . FIELD_NAME ) { p . nextToken ( ) ; tokens . copyCurrentStructure ( p ) ; t = p . nextToken ( ) ; } tokens . writeEndObject ( ) ; if ( bean . getClass ( ) != _beanType . getRawClass ( ) ) { ctxt . reportInputMismatch ( creatorProp , "Cannot create polymorphic instances with unwrapped values" ) ; return null ; } return _unwrappedPropertyHandler . processUnwrapped ( p , ctxt , bean , tokens ) ; } continue ; } if ( buffer . readIdProperty ( propName ) ) { continue ; } SettableBeanProperty prop = _beanProperties . find ( propName ) ; if ( prop != null ) { buffer . bufferProperty ( prop , _deserializeWithErrorWrapping ( p , ctxt , prop ) ) ; continue ; } if ( _ignorableProps != null && _ignorableProps . contains ( propName ) ) { handleIgnoredProperty ( p , ctxt , handledType ( ) , propName ) ; continue ; } if ( _anySetter == null ) { tokens . writeFieldName ( propName ) ; tokens . copyCurrentStructure ( p ) ; } else { TokenBuffer b2 = TokenBuffer . asCopyOfValue ( p ) ; tokens . writeFieldName ( propName ) ; tokens . append ( b2 ) ; try { buffer . bufferAnyProperty ( _anySetter , propName , _anySetter . deserialize ( b2 . asParserOnFirstToken ( ) , ctxt ) ) ; } catch ( Exception e ) { wrapAndThrow ( e , _beanType . getRawClass ( ) , propName , ctxt ) ; } continue ; } } Object bean ; try { bean = creator . build ( ctxt , buffer ) ; } catch ( Exception e ) { wrapInstantiationProblem ( e , ctxt ) ; return null ; } return _unwrappedPropertyHandler . processUnwrapped ( p , ctxt , bean , tokens ) ; }
@SuppressWarnings("resource") protected Object deserializeUsingPropertyBasedWithUnwrapped(JsonParser p, DeserializationContext ctxt) throws IOException { // 01-Dec-2016, tatu: Note: This IS legal to call, but only when unwrapped // value itself is NOT passed via `CreatorProperty` (which isn't supported). // Ok however to pass via setter or field. final PropertyBasedCreator creator = _propertyBasedCreator; PropertyValueBuffer buffer = creator.startBuilding(p, ctxt, _objectIdReader); TokenBuffer tokens = new TokenBuffer(p, ctxt); tokens.writeStartObject(); JsonToken t = p.getCurrentToken(); for (; t == JsonToken.FIELD_NAME; t = p.nextToken()) { String propName = p.getCurrentName(); p.nextToken(); // to point to value // creator property? SettableBeanProperty creatorProp = creator.findCreatorProperty(propName); if (creatorProp != null) { // Last creator property to set? if (buffer.assignParameter(creatorProp, _deserializeWithErrorWrapping(p, ctxt, creatorProp))) { t = p.nextToken(); // to move to following FIELD_NAME/END_OBJECT Object bean; try { bean = creator.build(ctxt, buffer); } catch (Exception e) { bean = wrapInstantiationProblem(e, ctxt); } // [databind#631]: Assign current value, to be accessible by custom serializers p.setCurrentValue(bean); // if so, need to copy all remaining tokens into buffer while (t == JsonToken.FIELD_NAME) { // NOTE: do NOT skip name as it needs to be copied; `copyCurrentStructure` does that tokens.copyCurrentStructure(p); t = p.nextToken(); } // 28-Aug-2018, tatu: Let's add sanity check here, easier to catch off-by-some // problems if we maintain invariants if (t != JsonToken.END_OBJECT) { ctxt.reportWrongTokenException(this, JsonToken.END_OBJECT, "Attempted to unwrap '%s' value", handledType().getName()); } tokens.writeEndObject(); if (bean.getClass() != _beanType.getRawClass()) { // !!! 08-Jul-2011, tatu: Could probably support; but for now // it's too complicated, so bail out ctxt.reportInputMismatch(creatorProp, "Cannot create polymorphic instances with unwrapped values"); return null; } return _unwrappedPropertyHandler.processUnwrapped(p, ctxt, bean, tokens); } continue; } // Object Id property? if (buffer.readIdProperty(propName)) { continue; } // regular property? needs buffering SettableBeanProperty prop = _beanProperties.find(propName); if (prop != null) { buffer.bufferProperty(prop, _deserializeWithErrorWrapping(p, ctxt, prop)); continue; } // Things marked as ignorable should not be passed to any setter if (_ignorableProps != null && _ignorableProps.contains(propName)) { handleIgnoredProperty(p, ctxt, handledType(), propName); continue; } // 29-Nov-2016, tatu: probably should try to avoid sending content // both to any setter AND buffer... but, for now, the only thing // we can do. // how about any setter? We'll get copies but... if (_anySetter == null) { // but... others should be passed to unwrapped property deserializers tokens.writeFieldName(propName); tokens.copyCurrentStructure(p); } else { // Need to copy to a separate buffer first TokenBuffer b2 = TokenBuffer.asCopyOfValue(p); tokens.writeFieldName(propName); tokens.append(b2); try { buffer.bufferAnyProperty(_anySetter, propName, _anySetter.deserialize(b2.asParserOnFirstToken(), ctxt)); } catch (Exception e) { wrapAndThrow(e, _beanType.getRawClass(), propName, ctxt); } continue; } } // We hit END_OBJECT, so: Object bean; try { bean = creator.build(ctxt, buffer); } catch (Exception e) { wrapInstantiationProblem(e, ctxt); return null; // never gets here } return _unwrappedPropertyHandler.processUnwrapped(p, ctxt, bean, tokens); }
@ SuppressWarnings ( "resource" ) protected Object deserializeUsingPropertyBasedWithUnwrapped ( JsonParser p , DeserializationContext ctxt ) throws IOException { final PropertyBasedCreator creator = _propertyBasedCreator ; PropertyValueBuffer buffer = creator . startBuilding ( p , ctxt , _objectIdReader ) ; TokenBuffer tokens = new TokenBuffer ( p , ctxt ) ; tokens . writeStartObject ( ) ; JsonToken t = p . getCurrentToken ( ) ; for ( ; t == JsonToken . FIELD_NAME ; t = p . nextToken ( ) ) { String propName = p . getCurrentName ( ) ; p . nextToken ( ) ; SettableBeanProperty creatorProp = creator . findCreatorProperty ( propName ) ; if ( creatorProp != null ) { if ( buffer . assignParameter ( creatorProp , _deserializeWithErrorWrapping ( p , ctxt , creatorProp ) ) ) { t = p . nextToken ( ) ; Object bean ; try { bean = creator . build ( ctxt , buffer ) ; } catch ( Exception e ) { bean = wrapInstantiationProblem ( e , ctxt ) ; } p . setCurrentValue ( bean ) ; while ( t == JsonToken . FIELD_NAME ) { tokens . copyCurrentStructure ( p ) ; t = p . nextToken ( ) ; } if ( t != JsonToken . END_OBJECT ) { ctxt . reportWrongTokenException ( this , JsonToken . END_OBJECT , "Attempted to unwrap '%s' value" , handledType ( ) . getName ( ) ) ; } tokens . writeEndObject ( ) ; if ( bean . getClass ( ) != _beanType . getRawClass ( ) ) { ctxt . reportInputMismatch ( creatorProp , "Cannot create polymorphic instances with unwrapped values" ) ; return null ; } return _unwrappedPropertyHandler . processUnwrapped ( p , ctxt , bean , tokens ) ; } continue ; } if ( buffer . readIdProperty ( propName ) ) { continue ; } SettableBeanProperty prop = _beanProperties . find ( propName ) ; if ( prop != null ) { buffer . bufferProperty ( prop , _deserializeWithErrorWrapping ( p , ctxt , prop ) ) ; continue ; } if ( _ignorableProps != null && _ignorableProps . contains ( propName ) ) { handleIgnoredProperty ( p , ctxt , handledType ( ) , propName ) ; continue ; } if ( _anySetter == null ) { tokens . writeFieldName ( propName ) ; tokens . copyCurrentStructure ( p ) ; } else { TokenBuffer b2 = TokenBuffer . asCopyOfValue ( p ) ; tokens . writeFieldName ( propName ) ; tokens . append ( b2 ) ; try { buffer . bufferAnyProperty ( _anySetter , propName , _anySetter . deserialize ( b2 . asParserOnFirstToken ( ) , ctxt ) ) ; } catch ( Exception e ) { wrapAndThrow ( e , _beanType . getRawClass ( ) , propName , ctxt ) ; } continue ; } } Object bean ; try { bean = creator . build ( ctxt , buffer ) ; } catch ( Exception e ) { wrapInstantiationProblem ( e , ctxt ) ; return null ; } return _unwrappedPropertyHandler . processUnwrapped ( p , ctxt , bean , tokens ) ; }
Math
20
src/main/java/org/apache/commons/math3/optimization/direct/CMAESOptimizer.java
920
923
CMAESOptimizer does not enforce bounds
The CMAESOptimizer can exceed the bounds passed to optimize. Looking at the generationLoop in doOptimize(), it does a bounds check by calling isFeasible() but if checkFeasableCount is zero (the default) then isFeasible() is never even called. Also, even with non-zero checkFeasableCount it may give up before finding an in-bounds offspring and go forward with an out-of-bounds offspring. This is against svn revision 1387637. I can provide an example program where the optimizer ends up with a fit outside the prescribed bounds if that would help.
public double[] repairAndDecode(final double[] x) { return decode(x); }
public double [ ] repairAndDecode ( final double [ ] x ) { return decode ( x ) ; }
public double[] repairAndDecode(final double[] x) { return boundaries != null && isRepairMode ? decode(repair(x)) : decode(x); }
public double [ ] repairAndDecode ( final double [ ] x ) { return boundaries != null && isRepairMode ? decode ( repair ( x ) ) : decode ( x ) ; }
JacksonDatabind
12
src/main/java/com/fasterxml/jackson/databind/deser/std/MapDeserializer.java
298
305
@JsonDeserialize on Map with contentUsing custom deserializer overwrites default behavior
I recently updated from version 2.3.3 to 2.5.1 and encountered a new issue with our custom deserializers. They either seemed to stop working or were active on the wrong fields. I could narrow it down to some change in version 2.4.4 (2.4.3 is still working for me) I wrote a test to show this behavior. It seems to appear when there a two maps with the same key and value types in a bean, and only one of them has a custom deserializer. The deserializer is then falsely used either for both or none of the maps. This test works for me in version 2.4.3 and fails with higher versions. ``` java import static org.junit.Assert.assertEquals; import java.io.IOException; import java.util.Map; import org.junit.Test; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.deser.std.StdDeserializer; public class DeserializeTest { @Test public void testIt() throws Exception { ObjectMapper om = new ObjectMapper(); String json = "{\"map1\":{\"a\":1},\"map2\":{\"a\":1}}"; TestBean bean = om.readValue(json.getBytes(), TestBean.class); assertEquals(100, bean.getMap1().get("a").intValue()); assertEquals(1, bean.getMap2().get("a").intValue()); } public static class TestBean { @JsonProperty("map1") @JsonDeserialize(contentUsing = CustomDeserializer.class) Map<String, Integer> map1; @JsonProperty("map2") Map<String, Integer> map2; public Map<String, Integer> getMap1() { return map1; } public void setMap1(Map<String, Integer> map1) { this.map1 = map1; } public Map<String, Integer> getMap2() { return map2; } public void setMap2(Map<String, Integer> map2) { this.map2 = map2; } } public static class CustomDeserializer extends StdDeserializer<Integer> { public CustomDeserializer() { super(Integer.class); } @Override public Integer deserialize(JsonParser p, DeserializationContext ctxt) throws IOException, JsonProcessingException { Integer value = p.readValueAs(Integer.class); return value * 100; } } } ```
@Override public boolean isCachable() { /* As per [databind#735], existence of value or key deserializer (only passed * if annotated to use non-standard one) should also prevent caching. */ return (_valueTypeDeserializer == null) && (_ignorableProperties == null); }
@ Override public boolean isCachable ( ) { return ( _valueTypeDeserializer == null ) && ( _ignorableProperties == null ) ; }
@Override public boolean isCachable() { /* As per [databind#735], existence of value or key deserializer (only passed * if annotated to use non-standard one) should also prevent caching. */ return (_valueDeserializer == null) && (_keyDeserializer == null) && (_valueTypeDeserializer == null) && (_ignorableProperties == null); }
@ Override public boolean isCachable ( ) { return ( _valueDeserializer == null ) && ( _keyDeserializer == null ) && ( _valueTypeDeserializer == null ) && ( _ignorableProperties == null ) ; }
Compress
39
src/main/java/org/apache/commons/compress/utils/ArchiveUtils.java
272
288
Defective .zip-archive produces problematic error message
A truncated .zip-File produces an java.io.EOFException conatining a hughe amount of byte[]-data in the error-message - leading to beeps and crippeling workload in an potential console-logger.
public static String sanitize(String s) { final char[] chars = s.toCharArray(); final int len = chars.length; final StringBuilder sb = new StringBuilder(); for (int i = 0; i < len; i++) { final char c = chars[i]; if (!Character.isISOControl(c)) { Character.UnicodeBlock block = Character.UnicodeBlock.of(c); if (block != null && block != Character.UnicodeBlock.SPECIALS) { sb.append(c); continue; } } sb.append('?'); } return sb.toString(); }
public static String sanitize ( String s ) { final char [ ] chars = s . toCharArray ( ) ; final int len = chars . length ; final StringBuilder sb = new StringBuilder ( ) ; for ( int i = 0 ; i < len ; i ++ ) { final char c = chars [ i ] ; if ( ! Character . isISOControl ( c ) ) { Character . UnicodeBlock block = Character . UnicodeBlock . of ( c ) ; if ( block != null && block != Character . UnicodeBlock . SPECIALS ) { sb . append ( c ) ; continue ; } } sb . append ( '?' ) ; } return sb . toString ( ) ; }
public static String sanitize(String s) { final char[] cs = s.toCharArray(); final char[] chars = cs.length <= MAX_SANITIZED_NAME_LENGTH ? cs : Arrays.copyOf(cs, MAX_SANITIZED_NAME_LENGTH); if (cs.length > MAX_SANITIZED_NAME_LENGTH) { for (int i = MAX_SANITIZED_NAME_LENGTH - 3; i < MAX_SANITIZED_NAME_LENGTH; i++) { chars[i] = '.'; } } final int len = chars.length; final StringBuilder sb = new StringBuilder(); for (int i = 0; i < len; i++) { final char c = chars[i]; if (!Character.isISOControl(c)) { Character.UnicodeBlock block = Character.UnicodeBlock.of(c); if (block != null && block != Character.UnicodeBlock.SPECIALS) { sb.append(c); continue; } } sb.append('?'); } return sb.toString(); }
public static String sanitize ( String s ) { final char [ ] cs = s . toCharArray ( ) ; final char [ ] chars = cs . length <= MAX_SANITIZED_NAME_LENGTH ? cs : Arrays . copyOf ( cs , MAX_SANITIZED_NAME_LENGTH ) ; if ( cs . length > MAX_SANITIZED_NAME_LENGTH ) { for ( int i = MAX_SANITIZED_NAME_LENGTH - 3 ; i < MAX_SANITIZED_NAME_LENGTH ; i ++ ) { chars [ i ] = '.' ; } } final int len = chars . length ; final StringBuilder sb = new StringBuilder ( ) ; for ( int i = 0 ; i < len ; i ++ ) { final char c = chars [ i ] ; if ( ! Character . isISOControl ( c ) ) { Character . UnicodeBlock block = Character . UnicodeBlock . of ( c ) ; if ( block != null && block != Character . UnicodeBlock . SPECIALS ) { sb . append ( c ) ; continue ; } } sb . append ( '?' ) ; } return sb . toString ( ) ; }
Cli
37
src/main/java/org/apache/commons/cli/DefaultParser.java
299
305
Optional argument picking up next regular option as its argument
None
private boolean isShortOption(String token) { // short options (-S, -SV, -S=V, -SV1=V2, -S1S2) return token.startsWith("-") && token.length() >= 2 && options.hasShortOption(token.substring(1, 2)); // remove leading "-" and "=value" }
private boolean isShortOption ( String token ) { return token . startsWith ( "-" ) && token . length ( ) >= 2 && options . hasShortOption ( token . substring ( 1 , 2 ) ) ; }
private boolean isShortOption(String token) { // short options (-S, -SV, -S=V, -SV1=V2, -S1S2) if (!token.startsWith("-") || token.length() == 1) { return false; } // remove leading "-" and "=value" int pos = token.indexOf("="); String optName = pos == -1 ? token.substring(1) : token.substring(1, pos); return options.hasShortOption(optName); }
private boolean isShortOption ( String token ) { if ( ! token . startsWith ( "-" ) || token . length ( ) == 1 ) { return false ; } int pos = token . indexOf ( "=" ) ; String optName = pos == - 1 ? token . substring ( 1 ) : token . substring ( 1 , pos ) ; return options . hasShortOption ( optName ) ; }
JacksonCore
25
src/main/java/com/fasterxml/jackson/core/json/ReaderBasedJsonParser.java
1948
1990
Fix ArrayIndexOutofBoundsException found by LGTM.com
Seen on LGTM.com [here](https://lgtm.com/projects/g/FasterXML/jackson-core/alerts/?mode=tree) As `codes.length == maxCode` so if `i == maxCode` an `ArrayIndexOutOfBoundsException` is thrown. This happens when `ALLOW_UNQUOTED_FIELD_NAMES` is enabled and character `256` is found as part of a field name after needing to consume more data from the reader. A gist containing code to trigger this path can be found [here](https://gist.github.com/aeyerstaylor/90128cca75e69303254a0d5a5dbe6762). I could find any tests for this class but if there is a place to add tests I can add the example as a test. _(Full disclosure: I'm part of the company behind LGTM.com)_
private String _handleOddName2(int startPtr, int hash, int[] codes) throws IOException { _textBuffer.resetWithShared(_inputBuffer, startPtr, (_inputPtr - startPtr)); char[] outBuf = _textBuffer.getCurrentSegment(); int outPtr = _textBuffer.getCurrentSegmentSize(); final int maxCode = codes.length; while (true) { if (_inputPtr >= _inputEnd) { if (!_loadMore()) { // acceptable for now (will error out later) break; } } char c = _inputBuffer[_inputPtr]; int i = (int) c; if (i <= maxCode) { if (codes[i] != 0) { break; } } else if (!Character.isJavaIdentifierPart(c)) { break; } ++_inputPtr; hash = (hash * CharsToNameCanonicalizer.HASH_MULT) + i; // Ok, let's add char to output: outBuf[outPtr++] = c; // Need more room? if (outPtr >= outBuf.length) { outBuf = _textBuffer.finishCurrentSegment(); outPtr = 0; } } _textBuffer.setCurrentLength(outPtr); { TextBuffer tb = _textBuffer; char[] buf = tb.getTextBuffer(); int start = tb.getTextOffset(); int len = tb.size(); return _symbols.findSymbol(buf, start, len, hash); } }
private String _handleOddName2 ( int startPtr , int hash , int [ ] codes ) throws IOException { _textBuffer . resetWithShared ( _inputBuffer , startPtr , ( _inputPtr - startPtr ) ) ; char [ ] outBuf = _textBuffer . getCurrentSegment ( ) ; int outPtr = _textBuffer . getCurrentSegmentSize ( ) ; final int maxCode = codes . length ; while ( true ) { if ( _inputPtr >= _inputEnd ) { if ( ! _loadMore ( ) ) { break ; } } char c = _inputBuffer [ _inputPtr ] ; int i = ( int ) c ; if ( i <= maxCode ) { if ( codes [ i ] != 0 ) { break ; } } else if ( ! Character . isJavaIdentifierPart ( c ) ) { break ; } ++ _inputPtr ; hash = ( hash * CharsToNameCanonicalizer . HASH_MULT ) + i ; outBuf [ outPtr ++ ] = c ; if ( outPtr >= outBuf . length ) { outBuf = _textBuffer . finishCurrentSegment ( ) ; outPtr = 0 ; } } _textBuffer . setCurrentLength ( outPtr ) ; { TextBuffer tb = _textBuffer ; char [ ] buf = tb . getTextBuffer ( ) ; int start = tb . getTextOffset ( ) ; int len = tb . size ( ) ; return _symbols . findSymbol ( buf , start , len , hash ) ; } }
private String _handleOddName2(int startPtr, int hash, int[] codes) throws IOException { _textBuffer.resetWithShared(_inputBuffer, startPtr, (_inputPtr - startPtr)); char[] outBuf = _textBuffer.getCurrentSegment(); int outPtr = _textBuffer.getCurrentSegmentSize(); final int maxCode = codes.length; while (true) { if (_inputPtr >= _inputEnd) { if (!_loadMore()) { // acceptable for now (will error out later) break; } } char c = _inputBuffer[_inputPtr]; int i = (int) c; if (i < maxCode) { if (codes[i] != 0) { break; } } else if (!Character.isJavaIdentifierPart(c)) { break; } ++_inputPtr; hash = (hash * CharsToNameCanonicalizer.HASH_MULT) + i; // Ok, let's add char to output: outBuf[outPtr++] = c; // Need more room? if (outPtr >= outBuf.length) { outBuf = _textBuffer.finishCurrentSegment(); outPtr = 0; } } _textBuffer.setCurrentLength(outPtr); { TextBuffer tb = _textBuffer; char[] buf = tb.getTextBuffer(); int start = tb.getTextOffset(); int len = tb.size(); return _symbols.findSymbol(buf, start, len, hash); } }
private String _handleOddName2 ( int startPtr , int hash , int [ ] codes ) throws IOException { _textBuffer . resetWithShared ( _inputBuffer , startPtr , ( _inputPtr - startPtr ) ) ; char [ ] outBuf = _textBuffer . getCurrentSegment ( ) ; int outPtr = _textBuffer . getCurrentSegmentSize ( ) ; final int maxCode = codes . length ; while ( true ) { if ( _inputPtr >= _inputEnd ) { if ( ! _loadMore ( ) ) { break ; } } char c = _inputBuffer [ _inputPtr ] ; int i = ( int ) c ; if ( i < maxCode ) { if ( codes [ i ] != 0 ) { break ; } } else if ( ! Character . isJavaIdentifierPart ( c ) ) { break ; } ++ _inputPtr ; hash = ( hash * CharsToNameCanonicalizer . HASH_MULT ) + i ; outBuf [ outPtr ++ ] = c ; if ( outPtr >= outBuf . length ) { outBuf = _textBuffer . finishCurrentSegment ( ) ; outPtr = 0 ; } } _textBuffer . setCurrentLength ( outPtr ) ; { TextBuffer tb = _textBuffer ; char [ ] buf = tb . getTextBuffer ( ) ; int start = tb . getTextOffset ( ) ; int len = tb . size ( ) ; return _symbols . findSymbol ( buf , start , len , hash ) ; } }
Math
61
src/main/java/org/apache/commons/math/distribution/PoissonDistributionImpl.java
92
100
Dangerous code in "PoissonDistributionImpl"
In the following excerpt from class "PoissonDistributionImpl": {code:title=PoissonDistributionImpl.java|borderStyle=solid} public PoissonDistributionImpl(double p, NormalDistribution z) { super(); setNormal(z); setMean(p); } {code} (1) Overridable methods are called within the constructor. (2) The reference "z" is stored and modified within the class. I've encountered problem (1) in several classes while working on issue 348. In those cases, in order to remove potential problems, I copied/pasted the body of the "setter" methods inside the constructor but I think that a more elegant solution would be to remove the "setters" altogether (i.e. make the classes immutable). Problem (2) can also create unexpected behaviour. Is it really necessary to pass the "NormalDistribution" object; can't it be always created within the class?
public PoissonDistributionImpl(double p, double epsilon, int maxIterations) { if (p <= 0) { throw MathRuntimeException.createIllegalArgumentException(LocalizedFormats.NOT_POSITIVE_POISSON_MEAN, p); } mean = p; normal = new NormalDistributionImpl(p, FastMath.sqrt(p)); this.epsilon = epsilon; this.maxIterations = maxIterations; }
public PoissonDistributionImpl ( double p , double epsilon , int maxIterations ) { if ( p <= 0 ) { throw MathRuntimeException . createIllegalArgumentException ( LocalizedFormats . NOT_POSITIVE_POISSON_MEAN , p ) ; } mean = p ; normal = new NormalDistributionImpl ( p , FastMath . sqrt ( p ) ) ; this . epsilon = epsilon ; this . maxIterations = maxIterations ; }
public PoissonDistributionImpl(double p, double epsilon, int maxIterations) { if (p <= 0) { throw new NotStrictlyPositiveException(LocalizedFormats.MEAN, p); } mean = p; normal = new NormalDistributionImpl(p, FastMath.sqrt(p)); this.epsilon = epsilon; this.maxIterations = maxIterations; }
public PoissonDistributionImpl ( double p , double epsilon , int maxIterations ) { if ( p <= 0 ) { throw new NotStrictlyPositiveException ( LocalizedFormats . MEAN , p ) ; } mean = p ; normal = new NormalDistributionImpl ( p , FastMath . sqrt ( p ) ) ; this . epsilon = epsilon ; this . maxIterations = maxIterations ; }
JxPath
10
src/java/org/apache/commons/jxpath/ri/compiler/CoreOperationRelationalExpression.java
41
44
Binary operators behaviour involving node-sets is incorrect
According to XPath specification: "If both objects to be compared are node-sets, then the comparison will be true if and only if there is a node in the first node-set and a node in the second node-set such that the result of performing the comparison on the string-values of the two nodes is true. If one object to be compared is a node-set and the other is a number, then the comparison will be true if and only if there is a node in the node-set such that the result of performing the comparison on the number to be compared and on the result of converting the string-value of that node to a number using the number function is true." But following example illustrates, that this is not a JXPath behaviour: JXPathContext pathContext = JXPathContext .newContext(DocumentBuilderFactory.newInstance() .newDocumentBuilder().parse( new InputSource(new StringReader( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" + "<doc/>")))); Boolean result = (Boolean) pathContext.getValue("2.0 > child1", Boolean.class); assertFalse(result.booleanValue()); "child1" is not found - right operand node set is empty, but result is TRUE, instead of FALSE. Please, check greaterThan(), lesserThan(), etc methods of org.apache.xpath.objects.XObject for possible solution :)
public final Object computeValue(EvalContext context) { return compute(args[0].computeValue(context), args[1].computeValue(context)) ? Boolean.TRUE : Boolean.FALSE; }
public final Object computeValue ( EvalContext context ) { return compute ( args [ 0 ] . computeValue ( context ) , args [ 1 ] . computeValue ( context ) ) ? Boolean . TRUE : Boolean . FALSE ; }
public final Object computeValue(EvalContext context) { return compute(args[0].compute(context), args[1].compute(context)) ? Boolean.TRUE : Boolean.FALSE; }
public final Object computeValue ( EvalContext context ) { return compute ( args [ 0 ] . compute ( context ) , args [ 1 ] . compute ( context ) ) ? Boolean . TRUE : Boolean . FALSE ; }