signature
stringlengths 43
39.1k
| implementation
stringlengths 0
450k
|
---|---|
public class FormulaWriter { /** * Writes a given formula to a file with a given formula formatter .
* @ param fileName the file name of the file
* @ param formula the formula to write
* @ param splitAndMultiline indicates whether - if the formula is an conjunction - the single operands should be
* written to different lines without a conjoining operator
* @ param formatter the formatter for the formula
* @ throws IOException if there was a problem writing the file */
public static void write ( final String fileName , final Formula formula , final boolean splitAndMultiline , final FormulaStringRepresentation formatter ) throws IOException { } } | write ( new File ( fileName ) , formula , splitAndMultiline , formatter ) ; |
public class JainSipUtils { /** * RFC 1918 address spaces */
public static int getAddressOutboundness ( String address ) { } } | if ( address . startsWith ( "127.0" ) ) return 0 ; if ( address . startsWith ( "192.168" ) ) return 1 ; if ( address . startsWith ( "10." ) ) return 2 ; if ( address . startsWith ( "172.16" ) || address . startsWith ( "172.17" ) || address . startsWith ( "172.18" ) || address . startsWith ( "172.19" ) || address . startsWith ( "172.20" ) || address . startsWith ( "172.21" ) || address . startsWith ( "172.22" ) || address . startsWith ( "172.23" ) || address . startsWith ( "172.24" ) || address . startsWith ( "172.25" ) || address . startsWith ( "172.26" ) || address . startsWith ( "172.27" ) || address . startsWith ( "172.28" ) || address . startsWith ( "172.29" ) || address . startsWith ( "172.30" ) || address . startsWith ( "172.31" ) ) return 3 ; if ( address . indexOf ( "." ) > 0 ) return 4 ; // match IPv4 addresses heuristically
return - 1 ; // matches IPv6 or something malformed ; |
public class SynchronizeFXTomcatServlet { /** * Creates a new { @ link SynchronizeFxServer } that synchronizes it ' s own model .
* Each { @ link SynchronizeFxServer } managed by this servlet must have its own channel name .
* @ param root The root object of the model that should be synchronized .
* @ param channelName The name of the channel at which clients can connect to the new server .
* @ param callback Used to inform the user of this class on errors . The methods in the callback are not called
* before you call { @ link SynchronizeFxServer # start ( ) }
* @ throws IllegalArgumentException When a { @ link SynchronizeFxServer } was already started with the given channel
* name and has not yet been shut down .
* @ return The created server
* @ see SynchronizeFxServer # SynchronizeFxServer ( Object ,
* de . saxsys . synchronizefx . core . clientserver . CommandTransferServer , ServerCallback ) */
public SynchronizeFxServer newChannel ( final Object root , final String channelName , final ServerCallback callback ) { } } | synchronized ( channels ) { if ( channels . containsKey ( channelName ) ) { throw new IllegalArgumentException ( "A new SynchronizeFX channel with the name \"" + channelName + "\" should be created a channel with this name does already exist." ) ; } final SynchronizeFXTomcatChannel channel = new SynchronizeFXTomcatChannel ( this , newSerializer ( ) ) ; final SynchronizeFxServer server = new SynchronizeFxServer ( root , channel , callback ) ; channels . put ( channelName , channel ) ; servers . put ( server , channel ) ; return server ; } |
public class BeamSearch { /** * Returns the best sequence of outcomes based on model for this object .
* @ param sequence The input sequence .
* @ param additionalContext An Object [ ] of additional context . This is passed to the context generator blindly with the assumption that the context are appropiate .
* @ return The top ranked sequence of outcomes . */
public Sequence bestSequence ( Object [ ] sequence , Object [ ] additionalContext ) { } } | return bestSequences ( 1 , sequence , additionalContext , zeroLog ) [ 0 ] ; |
public class CmsSetupUI { /** * Shows the given step .
* @ param step the step */
protected void showStep ( A_CmsSetupStep step ) { } } | Window window = newWindow ( ) ; window . setContent ( step ) ; window . setCaption ( step . getTitle ( ) ) ; A_CmsUI . get ( ) . addWindow ( window ) ; window . center ( ) ; |
public class CommentGenerator { /** * 添加新元素到子元素的最前面 */
public void addToFirstChildren ( XmlElement parent , Element child ) { } } | List < Element > elements = parent . getElements ( ) ; elements . add ( 0 , child ) ; |
public class CmsDomUtil { /** * Returns the computed style of the given element as floating point number . < p >
* @ param element the element
* @ param style the CSS property
* @ return the currently computed style */
public static double getCurrentStyleFloat ( Element element , Style style ) { } } | String currentStyle = getCurrentStyle ( element , style ) ; return CmsClientStringUtil . parseFloat ( currentStyle ) ; |
public class ShrinkWrapPath { /** * Relativizes the paths recursively
* @ param thisOriginal
* @ param thisCurrent
* @ param otherOriginal
* @ param otherCurrent
* @ param backupCount
* @ return */
private static ShrinkWrapPath relativizeCommonRoot ( final ShrinkWrapPath thisOriginal , final Path thisCurrent , final Path otherOriginal , Path otherCurrent , final int backupCount ) { } } | // Preconditions
assert thisOriginal != null ; assert thisCurrent != null ; assert otherOriginal != null ; assert otherCurrent != null ; assert backupCount >= 0 ; // Do we yet have a common root ?
if ( ! otherCurrent . startsWith ( thisCurrent ) ) { // Back up until we do
final Path otherParent = otherCurrent . getParent ( ) ; final ShrinkWrapPath thisParent = ( ShrinkWrapPath ) thisCurrent . getParent ( ) ; if ( otherParent != null && thisParent != null ) { return relativizeCommonRoot ( thisOriginal , thisParent , otherOriginal , otherParent , backupCount + 1 ) ; } else { throw new IllegalArgumentException ( "No common components" ) ; } } // Common root . Now relativize that .
final List < String > thisTokens = tokenize ( thisOriginal ) ; final List < String > otherTokens = tokenize ( ( ShrinkWrapPath ) otherOriginal ) ; final int numOtherTokens = otherTokens . size ( ) ; final int numToTake = otherTokens . size ( ) - thisTokens . size ( ) ; final StringBuilder sb = new StringBuilder ( ) ; for ( int i = 0 ; i < backupCount ; i ++ ) { sb . append ( DIR_BACK ) ; sb . append ( ArchivePath . SEPARATOR ) ; } final int startCounter = numOtherTokens - numToTake - backupCount ; final int stopCounter = numOtherTokens - 1 ; if ( log . isLoggable ( Level . FINEST ) ) { log . finest ( "Backup: " + backupCount ) ; log . finest ( "This tokens: " + thisTokens ) ; log . finest ( "Other tokens: " + otherTokens ) ; log . finest ( "Differential: " + numToTake ) ; log . finest ( "Start: " + startCounter ) ; log . finest ( "Stop: " + stopCounter ) ; } for ( int i = startCounter ; i <= stopCounter ; i ++ ) { if ( i > startCounter ) { sb . append ( ArchivePath . SEPARATOR ) ; } sb . append ( otherTokens . get ( i ) ) ; } return new ShrinkWrapPath ( sb . toString ( ) , thisOriginal . fileSystem ) ; |
public class LabelSetterFactory { /** * フィールドのラベル情報を設定するためのアクセッサを作成します 。
* @ param beanClass フィールドが定義されているクラス情報
* @ param fieldName フィールドの名称
* @ return 位置情報のsetterが存在しない場合は空を返す 。
* @ throws IllegalArgumentException { @ literal beanClass = = null or fieldName = = null }
* @ throws IllegalArgumentException { @ literal fieldName . isEmpty ( ) = true } */
public Optional < LabelSetter > create ( final Class < ? > beanClass , final String fieldName ) { } } | ArgUtils . notNull ( beanClass , "beanClass" ) ; ArgUtils . notEmpty ( fieldName , "fieldName" ) ; // フィールド Map labelsの場合
Optional < LabelSetter > LabelSetter = createMapField ( beanClass , fieldName ) ; if ( LabelSetter . isPresent ( ) ) { return LabelSetter ; } // setter メソッドの場合
LabelSetter = createMethod ( beanClass , fieldName ) ; if ( LabelSetter . isPresent ( ) ) { return LabelSetter ; } // フィールド + labelの場合
LabelSetter = createField ( beanClass , fieldName ) ; if ( LabelSetter . isPresent ( ) ) { return LabelSetter ; } return Optional . empty ( ) ; |
public class Days { /** * Obtains a { @ code Days } from a text string such as { @ code PnD } .
* This will parse the string produced by { @ code toString ( ) } which is
* based on the ISO - 8601 period formats { @ code PnD } and { @ code PnW } .
* The string starts with an optional sign , denoted by the ASCII negative
* or positive symbol . If negative , the whole amount is negated .
* The ASCII letter " P " is next in upper or lower case .
* There are then two sections , each consisting of a number and a suffix .
* At least one of the two sections must be present .
* The sections have suffixes in ASCII of " W " and " D " for weeks and days ,
* accepted in upper or lower case . The suffixes must occur in order .
* The number part of each section must consist of ASCII digits .
* The number may be prefixed by the ASCII negative or positive symbol .
* The number must parse to an { @ code int } .
* The leading plus / minus sign , and negative values for weeks and days are
* not part of the ISO - 8601 standard .
* For example , the following are valid inputs :
* < pre >
* " P2D " - - Days . of ( 2)
* " P - 2D " - - Days . of ( - 2)
* " - P2D " - - Days . of ( - 2)
* " - P - 2D " - - Days . of ( 2)
* " P3W " - - Days . of ( 3 * 7)
* " P3W - 2D " - - Days . of ( 3 * 7 - 2)
* < / pre >
* @ param text the text to parse , not null
* @ return the parsed period , not null
* @ throws DateTimeParseException if the text cannot be parsed to a period */
@ FromString public static Days parse ( CharSequence text ) { } } | Objects . requireNonNull ( text , "text" ) ; Matcher matcher = PATTERN . matcher ( text ) ; if ( matcher . matches ( ) ) { int negate = "-" . equals ( matcher . group ( 1 ) ) ? - 1 : 1 ; String weeksStr = matcher . group ( 2 ) ; String daysStr = matcher . group ( 3 ) ; if ( weeksStr != null || daysStr != null ) { int days = 0 ; if ( daysStr != null ) { try { days = Integer . parseInt ( daysStr ) ; } catch ( NumberFormatException ex ) { throw new DateTimeParseException ( "Text cannot be parsed to a Days, non-numeric days" , text , 0 , ex ) ; } } if ( weeksStr != null ) { try { int weeks = Math . multiplyExact ( Integer . parseInt ( weeksStr ) , DAYS_PER_WEEK ) ; days = Math . addExact ( days , weeks ) ; } catch ( NumberFormatException ex ) { throw new DateTimeParseException ( "Text cannot be parsed to a Days, non-numeric weeks" , text , 0 , ex ) ; } } return of ( Math . multiplyExact ( days , negate ) ) ; } } throw new DateTimeParseException ( "Text cannot be parsed to a Days" , text , 0 ) ; |
public class RequestFromVertx { /** * Gets all the parameters from the request .
* @ return The parameters */
@ Override public Map < String , List < String > > parameters ( ) { } } | Map < String , List < String > > result = new HashMap < > ( ) ; for ( String key : request . params ( ) . names ( ) ) { result . put ( key , request . params ( ) . getAll ( key ) ) ; } return result ; |
public class SpatialDbsImportUtils { /** * Create a spatial table using a schema .
* @ param db the database to use .
* @ param schema the schema to use .
* @ param newTableName the new name of the table . If null , the shp name is used .
* @ return the name of the created table .
* @ param avoidSpatialIndex if < code > true < / code > , no spatial index will be created . This is useful if many records
* have to be inserted and the index will be created later manually .
* @ throws Exception */
public static String createTableFromSchema ( ASpatialDb db , SimpleFeatureType schema , String newTableName , boolean avoidSpatialIndex ) throws Exception { } } | GeometryDescriptor geometryDescriptor = schema . getGeometryDescriptor ( ) ; ADatabaseSyntaxHelper dsh = db . getType ( ) . getDatabaseSyntaxHelper ( ) ; List < String > attrSql = new ArrayList < String > ( ) ; List < AttributeDescriptor > attributeDescriptors = schema . getAttributeDescriptors ( ) ; for ( AttributeDescriptor attributeDescriptor : attributeDescriptors ) { String attrName = attributeDescriptor . getLocalName ( ) ; if ( attributeDescriptor instanceof GeometryDescriptor ) { continue ; } else if ( attrName . equalsIgnoreCase ( ASpatialDb . PK_UID ) ) { continue ; } Class < ? > binding = attributeDescriptor . getType ( ) . getBinding ( ) ; if ( binding . isAssignableFrom ( Double . class ) || binding . isAssignableFrom ( Float . class ) ) { attrSql . add ( attrName + " " + dsh . REAL ( ) ) ; } else if ( binding . isAssignableFrom ( Long . class ) || binding . isAssignableFrom ( Integer . class ) ) { attrSql . add ( attrName + " " + dsh . INTEGER ( ) ) ; } else if ( binding . isAssignableFrom ( String . class ) ) { attrSql . add ( attrName + " " + dsh . TEXT ( ) ) ; } else { attrSql . add ( attrName + " " + dsh . TEXT ( ) ) ; } } String typeString = null ; org . opengis . feature . type . GeometryType type = geometryDescriptor . getType ( ) ; Class < ? > binding = type . getBinding ( ) ; if ( binding . isAssignableFrom ( MultiPolygon . class ) ) { typeString = "MULTIPOLYGON" ; } else if ( binding . isAssignableFrom ( Polygon . class ) ) { typeString = "POLYGON" ; } else if ( binding . isAssignableFrom ( MultiLineString . class ) ) { typeString = "MULTILINESTRING" ; } else if ( binding . isAssignableFrom ( LineString . class ) ) { typeString = "LINESTRING" ; } else if ( binding . isAssignableFrom ( MultiPoint . class ) ) { typeString = "MULTIPOINT" ; } else if ( binding . isAssignableFrom ( Point . class ) ) { typeString = "POINT" ; } if ( typeString != null ) { String codeFromCrs = CrsUtilities . getCodeFromCrs ( schema . getCoordinateReferenceSystem ( ) ) ; if ( codeFromCrs == null || codeFromCrs . toLowerCase ( ) . contains ( "null" ) ) { codeFromCrs = "4326" ; // fallback on 4326
} codeFromCrs = codeFromCrs . replaceFirst ( "EPSG:" , "" ) ; if ( db instanceof SpatialiteDb ) { SpatialiteDb spatialiteDb = ( SpatialiteDb ) db ; spatialiteDb . createTable ( newTableName , attrSql . toArray ( new String [ 0 ] ) ) ; spatialiteDb . addGeometryXYColumnAndIndex ( newTableName , GEOMFIELD_FOR_SHAPEFILE , typeString , codeFromCrs , avoidSpatialIndex ) ; } else if ( db instanceof PostgisDb ) { PostgisDb postgisDb = ( PostgisDb ) db ; postgisDb . createTable ( newTableName , attrSql . toArray ( new String [ 0 ] ) ) ; postgisDb . addGeometryXYColumnAndIndex ( newTableName , GEOMFIELD_FOR_SHAPEFILE , typeString , codeFromCrs , avoidSpatialIndex ) ; } else if ( db instanceof H2GisDb ) { H2GisDb spatialiteDb = ( H2GisDb ) db ; String typeStringExtra = typeString ; // String typeStringExtra = " GEOMETRY ( " + typeString + " , " + codeFromCrs + " ) " ;
attrSql . add ( GEOMFIELD_FOR_SHAPEFILE + " " + typeStringExtra ) ; String [ ] array = attrSql . toArray ( new String [ 0 ] ) ; spatialiteDb . createTable ( newTableName , array ) ; spatialiteDb . addSrid ( newTableName , codeFromCrs , GEOMFIELD_FOR_SHAPEFILE ) ; if ( ! avoidSpatialIndex ) spatialiteDb . createSpatialIndex ( newTableName , GEOMFIELD_FOR_SHAPEFILE ) ; } } else { db . createTable ( newTableName , attrSql . toArray ( new String [ 0 ] ) ) ; } return newTableName ; |
public class PForDelta { /** * Decompress b - bit slots
* @ param outDecompSlots
* decompressed block which is the output
* @ param inCompBlock
* the compressed block which is the input
* @ param blockSize
* the block size
* @ param bits
* the value of the parameter b
* @ return the compressed size in bits of the data that has been
* decompressed */
public static int decompressBBitSlots ( int [ ] outDecompSlots , int [ ] inCompBlock , int blockSize , int bits ) { } } | int compressedBitSize = 0 ; int offset = HEADER_SIZE ; for ( int i = 0 ; i < blockSize ; i ++ ) { outDecompSlots [ i ] = readBits ( inCompBlock , offset , bits ) ; offset += bits ; } compressedBitSize = bits * blockSize ; return compressedBitSize ; |
public class RecurlyClient { /** * Deletes a specific redemption .
* @ param accountCode recurly account id
* @ param redemptionUuid recurly coupon redemption uuid */
public void deleteCouponRedemption ( final String accountCode , final String redemptionUuid ) { } } | doDELETE ( Accounts . ACCOUNTS_RESOURCE + "/" + accountCode + Redemption . REDEMPTIONS_RESOURCE + "/" + redemptionUuid ) ; |
public class DockerClient { /** * * MISC API */
public Info info ( ) throws DockerException { } } | WebResource webResource = client . resource ( restEndpointUrl + "/info" ) ; try { LOGGER . trace ( "GET: {}" , webResource ) ; return webResource . accept ( MediaType . APPLICATION_JSON ) . get ( Info . class ) ; } catch ( UniformInterfaceException exception ) { if ( exception . getResponse ( ) . getStatus ( ) == 500 ) { throw new DockerException ( "Server error." , exception ) ; } else { throw new DockerException ( exception ) ; } } |
public class RepositoryApi { /** * Get a Stream of repository branches from a project , sorted by name alphabetically .
* < pre > < code > GitLab Endpoint : GET / projects / : id / repository / branches < / code > < / pre >
* @ param projectIdOrPath the project in the form of an Integer ( ID ) , String ( path ) , or Project instance
* @ return a Stream of repository branches for the specified project
* @ throws GitLabApiException if any exception occurs */
public Stream < Branch > getBranchesStream ( Object projectIdOrPath ) throws GitLabApiException { } } | return ( getBranches ( projectIdOrPath , getDefaultPerPage ( ) ) . stream ( ) ) ; |
public class RecordingListener { /** * Process the queued items . */
private void processQueue ( ) { } } | CachedEvent cachedEvent ; try { IRTMPEvent event = null ; RTMPMessage message = null ; // get first event in the queue
cachedEvent = queue . poll ( ) ; if ( cachedEvent != null ) { // get the data type
final byte dataType = cachedEvent . getDataType ( ) ; // get the data
IoBuffer buffer = cachedEvent . getData ( ) ; // get the current size of the buffer / data
int bufferLimit = buffer . limit ( ) ; if ( bufferLimit > 0 ) { // create new RTMP message and push to the consumer
switch ( dataType ) { case Constants . TYPE_AGGREGATE : event = new Aggregate ( buffer ) ; event . setTimestamp ( cachedEvent . getTimestamp ( ) ) ; message = RTMPMessage . build ( event ) ; break ; case Constants . TYPE_AUDIO_DATA : event = new AudioData ( buffer ) ; event . setTimestamp ( cachedEvent . getTimestamp ( ) ) ; message = RTMPMessage . build ( event ) ; break ; case Constants . TYPE_VIDEO_DATA : event = new VideoData ( buffer ) ; event . setTimestamp ( cachedEvent . getTimestamp ( ) ) ; message = RTMPMessage . build ( event ) ; break ; default : event = new Notify ( buffer ) ; event . setTimestamp ( cachedEvent . getTimestamp ( ) ) ; message = RTMPMessage . build ( event ) ; break ; } // push it down to the recorder
recordingConsumer . pushMessage ( null , message ) ; } else if ( bufferLimit == 0 && dataType == Constants . TYPE_AUDIO_DATA ) { log . debug ( "Stream data size was 0, sending empty audio message" ) ; // allow for 0 byte audio packets
event = new AudioData ( IoBuffer . allocate ( 0 ) ) ; event . setTimestamp ( cachedEvent . getTimestamp ( ) ) ; message = RTMPMessage . build ( event ) ; // push it down to the recorder
recordingConsumer . pushMessage ( null , message ) ; } else { log . debug ( "Stream data size was 0, recording pipe will not be notified" ) ; } } } catch ( Exception e ) { log . warn ( "Exception while pushing to consumer" , e ) ; } |
public class LeaderRole { /** * Commits a command .
* @ param request the command request
* @ param future the command response future */
private void commitCommand ( CommandRequest request , CompletableFuture < CommandResponse > future ) { } } | final long term = raft . getTerm ( ) ; final long timestamp = System . currentTimeMillis ( ) ; CommandEntry command = new CommandEntry ( term , timestamp , request . session ( ) , request . sequenceNumber ( ) , request . operation ( ) ) ; appendAndCompact ( command ) . whenCompleteAsync ( ( entry , error ) -> { if ( error != null ) { Throwable cause = Throwables . getRootCause ( error ) ; if ( Throwables . getRootCause ( error ) instanceof StorageException . TooLarge ) { log . warn ( "Failed to append command {}" , command , cause ) ; future . complete ( CommandResponse . builder ( ) . withStatus ( RaftResponse . Status . ERROR ) . withError ( RaftError . Type . PROTOCOL_ERROR ) . build ( ) ) ; } else { future . complete ( CommandResponse . builder ( ) . withStatus ( RaftResponse . Status . ERROR ) . withError ( RaftError . Type . COMMAND_FAILURE ) . build ( ) ) ; } return ; } // Replicate the command to followers .
appender . appendEntries ( entry . index ( ) ) . whenComplete ( ( commitIndex , commitError ) -> { raft . checkThread ( ) ; if ( isRunning ( ) ) { // If the command was successfully committed , apply it to the state machine .
if ( commitError == null ) { raft . getServiceManager ( ) . < OperationResult > apply ( entry . index ( ) ) . whenComplete ( ( r , e ) -> { completeOperation ( r , CommandResponse . builder ( ) , e , future ) ; } ) ; } else { future . complete ( CommandResponse . builder ( ) . withStatus ( RaftResponse . Status . ERROR ) . withError ( RaftError . Type . COMMAND_FAILURE ) . build ( ) ) ; } } else { future . complete ( CommandResponse . builder ( ) . withStatus ( RaftResponse . Status . ERROR ) . withError ( RaftError . Type . COMMAND_FAILURE ) . build ( ) ) ; } } ) ; } , raft . getThreadContext ( ) ) ; |
public class DescribeStackSummaryRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( DescribeStackSummaryRequest describeStackSummaryRequest , ProtocolMarshaller protocolMarshaller ) { } } | if ( describeStackSummaryRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( describeStackSummaryRequest . getStackId ( ) , STACKID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class ImportSupport { /** * Strips a servlet session ID from < tt > url < / tt > . The session ID
* is encoded as a URL " path parameter " beginning with " jsessionid = " .
* We thus remove anything we find between " ; jsessionid = " ( inclusive )
* and either EOS or a subsequent ' ; ' ( exclusive ) . */
public static String stripSession ( String url ) { } } | StringBuffer u = new StringBuffer ( url ) ; int sessionStart ; while ( ( sessionStart = u . toString ( ) . indexOf ( ";jsessionid=" ) ) != - 1 ) { int sessionEnd = u . toString ( ) . indexOf ( ";" , sessionStart + 1 ) ; if ( sessionEnd == - 1 ) { sessionEnd = u . toString ( ) . indexOf ( "?" , sessionStart + 1 ) ; } if ( sessionEnd == - 1 ) // still
{ sessionEnd = u . length ( ) ; } u . delete ( sessionStart , sessionEnd ) ; } return u . toString ( ) ; |
public class DBFField { /** * Creates a DBFField object from the data read from the given
* DataInputStream .
* The data in the DataInputStream object is supposed to be organised
* correctly and the stream " pointer " is supposed to be positioned properly .
* @ param in DataInputStream
* @ param charset charset to use
* @ param useFieldFlags If the file can store field flags ( setting this to false ignore any data in byes 18-19)
* @ return Returns the created DBFField object .
* @ throws IOException If any stream reading problems occures . */
protected static DBFField createField ( DataInput in , Charset charset , boolean useFieldFlags ) throws IOException { } } | DBFField field = new DBFField ( ) ; byte t_byte = in . readByte ( ) ; if ( t_byte == ( byte ) 0x0d ) { return null ; } byte [ ] fieldName = new byte [ 11 ] ; in . readFully ( fieldName , 1 , 10 ) ; /* 1-10 */
fieldName [ 0 ] = t_byte ; int nameNullIndex = fieldName . length - 1 ; for ( int i = 0 ; i < fieldName . length ; i ++ ) { if ( fieldName [ i ] == ( byte ) 0 ) { nameNullIndex = i ; break ; } } field . name = new String ( fieldName , 0 , nameNullIndex , charset ) ; try { field . type = DBFDataType . fromCode ( in . readByte ( ) ) ; /* 11 */
} catch ( Exception e ) { field . type = DBFDataType . UNKNOWN ; } field . reserv1 = DBFUtils . readLittleEndianInt ( in ) ; /* 12-15 */
field . length = in . readUnsignedByte ( ) ; /* 16 */
field . decimalCount = in . readByte ( ) ; /* 17 */
field . reserv2 = DBFUtils . readLittleEndianShort ( in ) ; /* 18-19 */
field . workAreaId = in . readByte ( ) ; /* 20 */
field . reserv3 = DBFUtils . readLittleEndianShort ( in ) ; /* 21-22 */
field . setFieldsFlag = in . readByte ( ) ; /* 23 */
in . readFully ( field . reserv4 ) ; /* 24-30 */
field . indexFieldFlag = in . readByte ( ) ; /* 31 */
adjustLengthForLongCharSupport ( field ) ; if ( ! useFieldFlags ) { field . reserv2 = 0 ; } return field ; |
public class EpollServerSocketChannelConfig { /** * Set the { @ code TCP _ DEFER _ ACCEPT } option on the socket . See { @ code man 7 tcp } for more details . */
public EpollServerSocketChannelConfig setTcpDeferAccept ( int deferAccept ) { } } | try { ( ( EpollServerSocketChannel ) channel ) . socket . setTcpDeferAccept ( deferAccept ) ; return this ; } catch ( IOException e ) { throw new ChannelException ( e ) ; } |
public class CmsImageInfoDisplay { /** * Sets the crop format . < p >
* @ param cropFormat the crop format */
public void setCropFormat ( String cropFormat ) { } } | boolean visible = ( cropFormat != null ) ; if ( cropFormat == null ) { cropFormat = "" ; } m_labelCropFormat . setVisible ( visible ) ; m_removeCrop . setVisible ( visible ) ; m_displayCropFormat . setText ( cropFormat ) ; |
public class ZipFileContainer { /** * a single consolidated wrapper . */
@ Trivial private boolean deleteAll ( File rootFile ) { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Delete [ " + rootFile . getAbsolutePath ( ) + " ]" ) ; } if ( FileUtils . fileIsFile ( rootFile ) ) { boolean didDelete = FileUtils . fileDelete ( rootFile ) ; if ( ! didDelete ) { Tr . error ( tc , "Could not delete file [ " + rootFile . getAbsolutePath ( ) + " ]" ) ; } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Deleted" ) ; } } return didDelete ; } else { boolean didDeleteAll = true ; int deleteCount = 0 ; File childFiles [ ] = FileUtils . listFiles ( rootFile ) ; int childCount ; if ( childFiles != null ) { childCount = childFiles . length ; for ( File childFile : childFiles ) { // Keep iterating even if one of the deletes fails .
// Delete as much as possible .
if ( ! deleteAll ( childFile ) ) { didDeleteAll = false ; } else { deleteCount ++ ; } } } else { childCount = 0 ; deleteCount = 0 ; } if ( didDeleteAll ) { didDeleteAll = FileUtils . fileDelete ( rootFile ) ; } if ( ! didDeleteAll ) { Tr . error ( tc , "Could not delete directory [ " + rootFile . getAbsolutePath ( ) + " ]" ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Deleted [ " + Integer . valueOf ( deleteCount ) + " ]" + " of [ " + Integer . valueOf ( childCount ) + " ]" ) ; } return didDeleteAll ; } |
public class StorageSnippets { /** * [ VARIABLE " my _ unique _ bucket " ] */
public Acl updateDefaultBucketAcl ( String bucketName ) { } } | // [ START updateDefaultBucketAcl ]
Acl acl = storage . updateDefaultAcl ( bucketName , Acl . of ( User . ofAllAuthenticatedUsers ( ) , Role . OWNER ) ) ; // [ END updateDefaultBucketAcl ]
return acl ; |
public class GSFLWImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
@ Override public void eSet ( int featureID , Object newValue ) { } } | switch ( featureID ) { case AfplibPackage . GSFLW__MH : setMH ( ( Integer ) newValue ) ; return ; case AfplibPackage . GSFLW__MFR : setMFR ( ( Integer ) newValue ) ; return ; } super . eSet ( featureID , newValue ) ; |
public class Neighbour { /** * Gets the UUID for this Neighbour
* @ return SIBUuid */
public final SIBUuid8 getUUID ( ) { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { SibTr . entry ( tc , "getUUID" ) ; SibTr . exit ( tc , "getUUID" , iMEUuid ) ; } return iMEUuid ; |
public class RulePrunerPrinter { /** * Main runnable .
* @ param args parameters used .
* @ throws Exception if error occurs . */
public static void main ( String [ ] args ) throws Exception { } } | try { RulePrunerParameters params = new RulePrunerParameters ( ) ; JCommander jct = new JCommander ( params , args ) ; if ( 0 == args . length ) { jct . usage ( ) ; } else { // get params printed
StringBuffer sb = new StringBuffer ( 1024 ) ; sb . append ( "Rule pruner CLI v.1" ) . append ( CR ) ; sb . append ( "parameters:" ) . append ( CR ) ; sb . append ( " input file: " ) . append ( RulePrunerParameters . IN_FILE ) . append ( CR ) ; sb . append ( " output file: " ) . append ( RulePrunerParameters . OUT_FILE ) . append ( CR ) ; sb . append ( " SAX num. reduction: " ) . append ( RulePrunerParameters . SAX_NR_STRATEGY ) . append ( CR ) ; sb . append ( " SAX norm. threshold: " ) . append ( RulePrunerParameters . SAX_NORM_THRESHOLD ) . append ( CR ) ; sb . append ( " GI Algorithm: " ) . append ( RulePrunerParameters . GI_ALGORITHM_IMPLEMENTATION ) . append ( CR ) ; sb . append ( " Grid boundaries: " ) . append ( RulePrunerParameters . GRID_BOUNDARIES ) . append ( CR ) ; if ( ! ( Double . isNaN ( RulePrunerParameters . SUBSAMPLING_FRACTION ) ) ) { sb . append ( " Subsampling fraction: " ) . append ( RulePrunerParameters . SUBSAMPLING_FRACTION ) . append ( CR ) ; } // printer out the params before starting
System . err . print ( sb . toString ( ) ) ; // read the data in
String dataFName = RulePrunerParameters . IN_FILE ; double [ ] ts = TSProcessor . readFileColumn ( dataFName , 0 , 0 ) ; if ( ! ( Double . isNaN ( RulePrunerParameters . SUBSAMPLING_FRACTION ) ) ) { ts = Arrays . copyOfRange ( ts , 0 , ( int ) Math . round ( ( double ) ts . length * RulePrunerParameters . SUBSAMPLING_FRACTION ) ) ; } // printer out the params before starting
System . err . println ( " working with series of " + ts . length + " points ... " + CR ) ; // parse the boundaries params
int [ ] boundaries = toBoundaries ( RulePrunerParameters . GRID_BOUNDARIES ) ; // create the output file
BufferedWriter bw = new BufferedWriter ( new FileWriter ( new File ( RulePrunerParameters . OUT_FILE ) ) ) ; bw . write ( OUTPUT_HEADER ) ; ArrayList < SampledPoint > res = new ArrayList < SampledPoint > ( ) ; // we need to use this in the loop
RulePruner rp = new RulePruner ( ts ) ; // iterate over the grid evaluating the grammar
for ( int WINDOW_SIZE = boundaries [ 0 ] ; WINDOW_SIZE < boundaries [ 1 ] ; WINDOW_SIZE += boundaries [ 2 ] ) { for ( int PAA_SIZE = boundaries [ 3 ] ; PAA_SIZE < boundaries [ 4 ] ; PAA_SIZE += boundaries [ 5 ] ) { // check for invalid cases
if ( PAA_SIZE > WINDOW_SIZE ) { continue ; } for ( int ALPHABET_SIZE = boundaries [ 6 ] ; ALPHABET_SIZE < boundaries [ 7 ] ; ALPHABET_SIZE += boundaries [ 8 ] ) { SampledPoint p = rp . sample ( WINDOW_SIZE , PAA_SIZE , ALPHABET_SIZE , RulePrunerParameters . GI_ALGORITHM_IMPLEMENTATION , RulePrunerParameters . SAX_NR_STRATEGY , RulePrunerParameters . SAX_NORM_THRESHOLD ) ; bw . write ( p . toLogString ( ) + "\n" ) ; res . add ( p ) ; } } } bw . close ( ) ; Collections . sort ( res , new ReductionSorter ( ) ) ; System . out . println ( "\nApparently, the best parameters are " + res . get ( 0 ) . toString ( ) ) ; } } catch ( Exception e ) { System . err . println ( "error occured while parsing parameters " + Arrays . toString ( args ) + CR + StackTrace . toString ( e ) ) ; System . exit ( - 1 ) ; } |
public class MarketplaceComment { /** * Sets the creationTime value for this MarketplaceComment .
* @ param creationTime * The creation { @ link DateTime } of this { @ code MarketplaceComment } . */
public void setCreationTime ( com . google . api . ads . admanager . axis . v201805 . DateTime creationTime ) { } } | this . creationTime = creationTime ; |
public class MultiEnvAware { /** * By default we don ' t try to resolve anything , but others can extend this behavior if they ' d like to try to resolve
* not found values differently .
* @ param sKey
* @ param value
* @ return */
protected T resolve ( String sKey , T value ) { } } | LOG . error ( "Fail to find environment [{}] in {}" , sKey , this . keySet ( ) ) ; throw new MultiEnvSupportException ( String . format ( "Fail to find configuration for environment %s" , sKey ) ) ; |
public class RESTArtifactLoaderService { /** * Create response for browsing Maven repository .
* @ param node
* the root node for browsing .
* @ param mavenPath
* the Maven path , used for creating & lt ; a & gt ; element .
* @ return @ see { @ link Response } .
* @ throws IOException
* if i / o error occurs . */
private Response browseRepository ( final Node node , final String mavenPath , final String gadget ) throws IOException { } } | final PipedOutputStream po = new PipedOutputStream ( ) ; final PipedInputStream pi = new PipedInputStream ( po ) ; new Thread ( ) { @ Override @ SuppressWarnings ( "unchecked" ) public void run ( ) { try { XMLOutputFactory factory = XMLOutputFactory . newInstance ( ) ; // name spaces
factory . setProperty ( XMLOutputFactory . IS_REPAIRING_NAMESPACES , Boolean . TRUE ) ; XMLStreamWriter xsw = factory . createXMLStreamWriter ( po , Constants . DEFAULT_ENCODING ) ; xsw . writeStartDocument ( Constants . DEFAULT_ENCODING , "1.0" ) ; xsw . writeDTD ( "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" " + "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">" ) ; xsw . writeCharacters ( "\n" ) ; if ( gadget == null || ! gadget . equalsIgnoreCase ( "true" ) ) { xsw . writeStartElement ( "html" ) ; xsw . writeDefaultNamespace ( XHTML_NS ) ; xsw . writeStartElement ( "head" ) ; xsw . writeStartElement ( "style" ) ; xsw . writeAttribute ( "type" , "text/css" ) ; xsw . writeCharacters ( "a {text-decoration: none; color: #10409C; }" + "a:hover {text-decoration: underline;}" + ".centered { text-align: center; }" + ".underlined { border-bottom : 1px solid #cccccc; font-weight: bold; text-align: center; }\n" ) ; xsw . writeEndElement ( ) ; // style
xsw . writeStartElement ( "title" ) ; xsw . writeCharacters ( "Maven2 Repository Browser" ) ; xsw . writeEndElement ( ) ; // title
xsw . writeEndElement ( ) ; // head
} xsw . writeStartElement ( "body" ) ; xsw . writeStartElement ( "h2" ) ; xsw . writeAttribute ( "class" , "centered" ) ; xsw . writeCharacters ( "Maven2 Repository" ) ; xsw . writeEndElement ( ) ; xsw . writeStartElement ( "table" ) ; xsw . writeAttribute ( "width" , "90%" ) ; xsw . writeAttribute ( "style" , "table-layout:fixed;" ) ; // table header
xsw . writeStartElement ( "tr" ) ; xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "underlined" ) ; xsw . writeAttribute ( "width" , "7%" ) ; xsw . writeEndElement ( ) ; // th
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "underlined" ) ; xsw . writeCharacters ( "name" ) ; xsw . writeEndElement ( ) ; // th
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "underlined" ) ; xsw . writeAttribute ( "width" , "18%" ) ; xsw . writeCharacters ( "media-type" ) ; xsw . writeEndElement ( ) ; // th
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "underlined" ) ; xsw . writeAttribute ( "width" , "15%" ) ; xsw . writeCharacters ( "size" ) ; xsw . writeEndElement ( ) ; // th
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "underlined" ) ; xsw . writeAttribute ( "width" , "18%" ) ; xsw . writeCharacters ( "last modified" ) ; xsw . writeEndElement ( ) ; // th
xsw . writeEndElement ( ) ; // tr
// end table header
// parent href
String parent = mavenPath . substring ( 0 , mavenPath . lastIndexOf ( '/' ) ) ; xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "parenticon" ) ; xsw . writeEndElement ( ) ; // td
xsw . writeStartElement ( "td" ) ; xsw . writeStartElement ( "a" ) ; xsw . writeAttribute ( "href" , parent + "?view=true&gadget=" + gadget ) ; xsw . writeCharacters ( ".." ) ; xsw . writeEndElement ( ) ; xsw . writeEndElement ( ) ; // td
xsw . writeEmptyElement ( "td" ) ; xsw . writeEmptyElement ( "td" ) ; xsw . writeEmptyElement ( "td" ) ; // NodeIterator nodes = node . getNodes ( ) ;
EntityCollection nodes = ( EntityCollection ) node . getNodes ( ) ; Collections . sort ( nodes . getList ( ) , NODE_COMPARATOR ) ; while ( nodes . hasNext ( ) ) { Node node = nodes . nextNode ( ) ; xsw . writeStartElement ( "tr" ) ; if ( RESTArtifactLoaderService . isFile ( node ) ) { if ( node . getName ( ) . endsWith ( "sha1" ) ) continue ; NodeRepresentation nodeRepresentation = nodeRepresentationService . getNodeRepresentation ( node , null ) ; xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "fileicon" ) ; xsw . writeEndElement ( ) ; // td
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "style" , "font-style: italic;" ) ; xsw . writeStartElement ( "a" ) ; xsw . writeAttribute ( "href" , ( mavenPath . endsWith ( "/" ) ? mavenPath + node . getName ( ) : mavenPath + "/" + node . getName ( ) ) + "?view=true&gadget=" + gadget ) ; xsw . writeCharacters ( node . getName ( ) ) ; xsw . writeEndElement ( ) ; xsw . writeEndElement ( ) ; // td
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "centered" ) ; xsw . writeCharacters ( nodeRepresentation . getMediaType ( ) ) ; xsw . writeEndElement ( ) ; // td
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "centered" ) ; xsw . writeCharacters ( "" + nodeRepresentation . getContentLenght ( ) ) ; xsw . writeEndElement ( ) ; // td
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "centered" ) ; xsw . writeCharacters ( new Date ( nodeRepresentation . getLastModified ( ) ) . toString ( ) ) ; xsw . writeEndElement ( ) ; // td
} else { xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "foldericon" ) ; xsw . writeEndElement ( ) ; // td
xsw . writeStartElement ( "td" ) ; xsw . writeStartElement ( "a" ) ; xsw . writeAttribute ( "href" , ( mavenPath . endsWith ( "/" ) ? mavenPath + node . getName ( ) : mavenPath + "/" + node . getName ( ) ) + "?view=true&gadget=" + gadget ) ; xsw . writeCharacters ( node . getName ( ) ) ; xsw . writeEndElement ( ) ; xsw . writeEndElement ( ) ; // td
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "centered" ) ; xsw . writeCharacters ( "-" ) ; xsw . writeEndElement ( ) ; // td
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "centered" ) ; xsw . writeCharacters ( "-" ) ; xsw . writeEndElement ( ) ; // td
xsw . writeStartElement ( "td" ) ; xsw . writeAttribute ( "class" , "centered" ) ; xsw . writeCharacters ( "-" ) ; xsw . writeEndElement ( ) ; // td
} xsw . writeEndElement ( ) ; // tr
} xsw . writeStartElement ( "tr" ) ; xsw . writeEndElement ( ) ; xsw . writeEndElement ( ) ; // table
xsw . writeEndElement ( ) ; // body
if ( gadget == null || ! gadget . equalsIgnoreCase ( "true" ) ) { xsw . writeEndElement ( ) ; // html
} xsw . writeEndDocument ( ) ; } catch ( XMLStreamException xmle ) { LOG . error ( xmle . getLocalizedMessage ( ) , xmle ) ; } catch ( RepositoryException re ) { LOG . error ( re . getLocalizedMessage ( ) , re ) ; } finally { try { po . flush ( ) ; po . close ( ) ; } catch ( IOException e ) { LOG . error ( e . getLocalizedMessage ( ) , e ) ; } } } } . start ( ) ; // application / xhtml + xml content type is recommended for XHTML , but IE6
// does ' t support this .
return Response . ok ( pi , "text/html" ) . build ( ) ; |
public class CacheMetricsRegistrarConfiguration { /** * Get the name of a { @ link CacheManager } based on its { @ code beanName } .
* @ param beanName the name of the { @ link CacheManager } bean
* @ return a name for the given cache manager */
private String getCacheManagerName ( String beanName ) { } } | if ( beanName . length ( ) > CACHE_MANAGER_SUFFIX . length ( ) && StringUtils . endsWithIgnoreCase ( beanName , CACHE_MANAGER_SUFFIX ) ) { return beanName . substring ( 0 , beanName . length ( ) - CACHE_MANAGER_SUFFIX . length ( ) ) ; } return beanName ; |
public class CommercePriceListAccountRelLocalServiceBaseImpl { /** * Returns a range of commerce price list account rels matching the UUID and company .
* @ param uuid the UUID of the commerce price list account rels
* @ param companyId the primary key of the company
* @ param start the lower bound of the range of commerce price list account rels
* @ param end the upper bound of the range of commerce price list account rels ( not inclusive )
* @ param orderByComparator the comparator to order the results by ( optionally < code > null < / code > )
* @ return the range of matching commerce price list account rels , or an empty list if no matches were found */
@ Override public List < CommercePriceListAccountRel > getCommercePriceListAccountRelsByUuidAndCompanyId ( String uuid , long companyId , int start , int end , OrderByComparator < CommercePriceListAccountRel > orderByComparator ) { } } | return commercePriceListAccountRelPersistence . findByUuid_C ( uuid , companyId , start , end , orderByComparator ) ; |
public class NodeUtils { /** * Determines if leaf represents a possible range of values ( bounded or unbounded ) */
public static boolean isRangeLeaf ( Leaf leaf ) { } } | return leaf instanceof RangeLeaf || leaf instanceof GreaterThanEqualsLeaf || leaf instanceof GreaterThanLeaf || leaf instanceof LessThanLeaf || leaf instanceof LessThanEqualsLeaf ; |
public class U { /** * Documented , # once */
public static < T > Supplier < T > once ( final Supplier < T > function ) { } } | return new Supplier < T > ( ) { private volatile boolean executed ; private T result ; @ Override public T get ( ) { if ( ! executed ) { executed = true ; result = function . get ( ) ; } return result ; } } ; |
public class CompiledTranslator { /** * httl . properties : engine . name = httl . properties */
public void setEngineName ( String name ) { } } | if ( HTTL_DEFAULT . equals ( name ) ) { name = "" ; } else { if ( name . startsWith ( HTTL_PREFIX ) ) { name = name . substring ( HTTL_PREFIX . length ( ) ) ; } if ( name . endsWith ( PROPERTIES_SUFFIX ) ) { name = name . substring ( 0 , name . length ( ) - PROPERTIES_SUFFIX . length ( ) ) ; } } this . engineName = name ; |
public class CalcPoint { /** * Center a cloud of points . This means subtracting the { @ lin
* # centroid ( Point3d [ ] ) } of the cloud to each point .
* @ param x
* array of points . Point objects will be modified */
public static void center ( Point3d [ ] x ) { } } | Point3d center = centroid ( x ) ; center . negate ( ) ; translate ( new Vector3d ( center ) , x ) ; |
public class Database { public String [ ] get_class_attribute_list ( String classname , String wildcard ) throws DevFailed { } } | return databaseDAO . get_class_attribute_list ( this , classname , wildcard ) ; |
public class ChildWorkflowExecutionTimedOutEventAttributesMarshaller { /** * Marshall the given parameter object . */
public void marshall ( ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes , ProtocolMarshaller protocolMarshaller ) { } } | if ( childWorkflowExecutionTimedOutEventAttributes == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( childWorkflowExecutionTimedOutEventAttributes . getWorkflowExecution ( ) , WORKFLOWEXECUTION_BINDING ) ; protocolMarshaller . marshall ( childWorkflowExecutionTimedOutEventAttributes . getWorkflowType ( ) , WORKFLOWTYPE_BINDING ) ; protocolMarshaller . marshall ( childWorkflowExecutionTimedOutEventAttributes . getTimeoutType ( ) , TIMEOUTTYPE_BINDING ) ; protocolMarshaller . marshall ( childWorkflowExecutionTimedOutEventAttributes . getInitiatedEventId ( ) , INITIATEDEVENTID_BINDING ) ; protocolMarshaller . marshall ( childWorkflowExecutionTimedOutEventAttributes . getStartedEventId ( ) , STARTEDEVENTID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class Http100ContWriteCallback { /** * @ see
* com . ibm . wsspi . tcpchannel . TCPWriteCompletedCallback # complete ( com . ibm . wsspi
* . channelfw . VirtualConnection ,
* com . ibm . wsspi . tcpchannel . TCPWriteRequestContext ) */
@ SuppressWarnings ( "unused" ) public void complete ( VirtualConnection vc , TCPWriteRequestContext wsc ) { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "complete() called for vc=" + vc ) ; } HttpInboundLink link = ( HttpInboundLink ) vc . getStateMap ( ) . get ( CallbackIDs . CALLBACK_HTTPICL ) ; // we ' ve written the 100 continue response , now we can call up to the
// app channels above us
// reset the values on the response message first
link . getHTTPContext ( ) . resetMsgSentState ( ) ; HttpResponseMessage msg = link . getHTTPContext ( ) . getResponse ( ) ; msg . setStatusCode ( StatusCodes . OK ) ; msg . removeHeader ( HttpHeaderKeys . HDR_CONTENT_LENGTH ) ; link . handleDiscrimination ( ) ; |
public class Utils { /** * Get the identifier for the resource with a given type and key . */
private static int getIdentifier ( Context context , String type , String key ) { } } | return context . getResources ( ) . getIdentifier ( key , type , context . getPackageName ( ) ) ; |
public class Popups { /** * Displays an info message below the specified widget . */
public static InfoPopup infoBelow ( String message , Widget target ) { } } | return info ( message , Position . BELOW , target ) ; |
public class MenuUtil { /** * Adds a new menu item to the menu with the specified name and
* attributes .
* @ param l the action listener .
* @ param menu the menu to add the item to .
* @ param name the item name .
* @ param mnem the mnemonic key for the item or null if none .
* @ param accel the keystroke for the item or null if none .
* @ return the new menu item . */
public static JMenuItem addMenuItem ( ActionListener l , JMenu menu , String name , Integer mnem , KeyStroke accel ) { } } | JMenuItem item = createItem ( name , mnem , accel ) ; item . addActionListener ( l ) ; menu . add ( item ) ; return item ; |
public class AbstractHistogram { /** * Shift recorded values to the left ( the equivalent of a & lt ; & lt ; shift operation on all recorded values ) . The
* configured integer value range limits and value precision setting will remain unchanged .
* An { @ link ArrayIndexOutOfBoundsException } will be thrown if any recorded values may be lost
* as a result of the attempted operation , reflecting an " overflow " conditions . Expect such an overflow
* exception if the operation would cause the current maxValue to be scaled to a value that is outside
* of the covered value range .
* @ param numberOfBinaryOrdersOfMagnitude The number of binary orders of magnitude to shift by */
public void shiftValuesLeft ( final int numberOfBinaryOrdersOfMagnitude ) { } } | if ( numberOfBinaryOrdersOfMagnitude < 0 ) { throw new IllegalArgumentException ( "Cannot shift by a negative number of magnitudes" ) ; } if ( numberOfBinaryOrdersOfMagnitude == 0 ) { return ; } if ( getTotalCount ( ) == getCountAtIndex ( 0 ) ) { // ( no need to shift any values if all recorded values are at the 0 value level : )
return ; } final int shiftAmount = numberOfBinaryOrdersOfMagnitude << subBucketHalfCountMagnitude ; int maxValueIndex = countsArrayIndex ( getMaxValue ( ) ) ; // indicate overflow if maxValue is in the range being wrapped :
if ( maxValueIndex >= ( countsArrayLength - shiftAmount ) ) { throw new ArrayIndexOutOfBoundsException ( "Operation would overflow, would discard recorded value counts" ) ; } long maxValueBeforeShift = maxValueUpdater . getAndSet ( this , 0 ) ; long minNonZeroValueBeforeShift = minNonZeroValueUpdater . getAndSet ( this , Long . MAX_VALUE ) ; boolean lowestHalfBucketPopulated = ( minNonZeroValueBeforeShift < subBucketHalfCount ) ; // Perform the shift :
shiftNormalizingIndexByOffset ( shiftAmount , lowestHalfBucketPopulated ) ; // adjust min , max :
updateMinAndMax ( maxValueBeforeShift << numberOfBinaryOrdersOfMagnitude ) ; if ( minNonZeroValueBeforeShift < Long . MAX_VALUE ) { updateMinAndMax ( minNonZeroValueBeforeShift << numberOfBinaryOrdersOfMagnitude ) ; } |
public class SRTServletRequestThreadData { /** * Save the state of the parameters before a call to include or forward . */
public void pushParameterStack ( Map parameters ) { } } | if ( parameters == null ) { _paramStack . push ( null ) ; } else { _paramStack . push ( ( ( Hashtable ) parameters ) . clone ( ) ) ; } |
public class ScriptActionsInner { /** * Gets the script execution detail for the given script execution ID .
* @ param resourceGroupName The name of the resource group .
* @ param clusterName The name of the cluster .
* @ param scriptExecutionId The script execution Id
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the RuntimeScriptActionDetailInner object */
public Observable < RuntimeScriptActionDetailInner > getExecutionDetailAsync ( String resourceGroupName , String clusterName , String scriptExecutionId ) { } } | return getExecutionDetailWithServiceResponseAsync ( resourceGroupName , clusterName , scriptExecutionId ) . map ( new Func1 < ServiceResponse < RuntimeScriptActionDetailInner > , RuntimeScriptActionDetailInner > ( ) { @ Override public RuntimeScriptActionDetailInner call ( ServiceResponse < RuntimeScriptActionDetailInner > response ) { return response . body ( ) ; } } ) ; |
public class ShanksAgentBayesianReasoningCapability { /** * Add soft - evidence to the Bayesian network to reason with it .
* @ param bn
* @ param nodeName
* @ param softEvidence
* @ throws ShanksException */
public static void addSoftEvidence ( Network bn , String nodeName , HashMap < String , Double > softEvidence ) throws ShanksException { } } | String auxNodeName = softEvidenceNodePrefix + nodeName ; int targetNode = bn . getNode ( nodeName ) ; boolean found = false ; int [ ] children = bn . getChildren ( targetNode ) ; for ( int child : children ) { if ( bn . getNodeName ( child ) . equals ( auxNodeName ) ) { if ( bn . getOutcomeCount ( child ) == 2 && bn . getOutcomeId ( child , 0 ) . equals ( triggerState ) ) { found = true ; break ; } } } if ( ! found ) { // Create soft - evidence node
bn . addNode ( NodeType . Cpt , auxNodeName ) ; bn . setNodeName ( auxNodeName , auxNodeName ) ; int node = bn . getNode ( auxNodeName ) ; bn . setOutcomeId ( node , 0 , triggerState ) ; bn . setOutcomeId ( node , 1 , "NON" + triggerState ) ; // bn . insertOutcome ( auxNode , 0 , triggerState ) ;
// bn . insertOutcome ( auxNode , 1 , " NON " + triggerState ) ;
double [ ] cpt = bn . getNodeDefinition ( auxNodeName ) ; for ( int i = 0 ; i < cpt . length ; i ++ ) { cpt [ i ] = ( float ) 0.5 ; } bn . addArc ( targetNode , bn . getNode ( auxNodeName ) ) ; cpt = bn . getNodeDefinition ( auxNodeName ) ; for ( int i = 0 ; i < cpt . length ; i ++ ) { cpt [ i ] = ( float ) 0.5 ; } } ShanksAgentBayesianReasoningCapability . updateSoftEvidenceAuxiliaryNodeCPT ( bn , nodeName , softEvidence ) ; ShanksAgentBayesianReasoningCapability . addEvidence ( bn , softEvidenceNodePrefix + nodeName , triggerState ) ; |
public class DatastoreSnippets { /** * [ TARGET allocateId ( IncompleteKey ) ] */
public Key allocateIdSingle ( ) { } } | // [ START allocateIdSingle ]
KeyFactory keyFactory = datastore . newKeyFactory ( ) . setKind ( "MyKind" ) ; IncompleteKey incompleteKey = keyFactory . newKey ( ) ; // let cloud datastore automatically assign an id
Key key = datastore . allocateId ( incompleteKey ) ; // [ END allocateIdSingle ]
return key ; |
public class BigQueryOutputConfiguration { /** * Gets a configured instance of the stored { @ link FileOutputFormat } in the configuration .
* @ param conf the configuration to reference the keys from .
* @ return a configured instance of the stored { @ link FileOutputFormat } in the configuration .
* @ throws IOException if there ' s an issue getting an instance of a FileOutputFormat from the
* configuration . */
@ SuppressWarnings ( "rawtypes" ) public static FileOutputFormat getFileOutputFormat ( Configuration conf ) throws IOException { } } | // Ensure the BigQuery output information is valid .
ConfigurationUtil . getMandatoryConfig ( conf , BigQueryConfiguration . OUTPUT_FORMAT_CLASS_KEY ) ; Class < ? > confClass = conf . getClass ( BigQueryConfiguration . OUTPUT_FORMAT_CLASS_KEY , null ) ; // Fail if the default value was used , or the class isn ' t a FileOutputFormat .
if ( confClass == null ) { throw new IOException ( "Unable to resolve value for the configuration key '" + BigQueryConfiguration . OUTPUT_FORMAT_CLASS_KEY + "'." ) ; } else if ( ! FileOutputFormat . class . isAssignableFrom ( confClass ) ) { throw new IOException ( "The class " + confClass . getName ( ) + " is not a FileOutputFormat." ) ; } Class < ? extends FileOutputFormat > fileOutputClass = confClass . asSubclass ( FileOutputFormat . class ) ; // Create a new instance and configure it if it ' s configurable .
return ReflectionUtils . newInstance ( fileOutputClass , conf ) ; |
public class MtasDataItemAdvanced { /** * ( non - Javadoc )
* @ see mtas . codec . util . collector . MtasDataItem # getCompareValueType ( ) */
@ Override public int getCompareValueType ( ) throws IOException { } } | switch ( sortType ) { case CodecUtil . STATS_TYPE_N : return 0 ; case CodecUtil . STATS_TYPE_SUM : return 1 ; case CodecUtil . STATS_TYPE_MAX : return 1 ; case CodecUtil . STATS_TYPE_MIN : return 1 ; case CodecUtil . STATS_TYPE_SUMSQ : return 1 ; case CodecUtil . STATS_TYPE_SUMOFLOGS : return 2 ; case CodecUtil . STATS_TYPE_MEAN : return 2 ; case CodecUtil . STATS_TYPE_GEOMETRICMEAN : return 2 ; case CodecUtil . STATS_TYPE_STANDARDDEVIATION : return 2 ; case CodecUtil . STATS_TYPE_VARIANCE : return 2 ; case CodecUtil . STATS_TYPE_POPULATIONVARIANCE : return 2 ; case CodecUtil . STATS_TYPE_QUADRATICMEAN : return 2 ; default : throw new IOException ( "sortType " + sortType + " not supported" ) ; } |
public class ListItemBox { /** * Draws a bullet or text marker */
protected void drawBullet ( Graphics2D g ) { } } | ctx . updateGraphics ( g ) ; int x = ( int ) Math . round ( getAbsoluteContentX ( ) - 1.2 * ctx . getEm ( ) ) ; int y = ( int ) Math . round ( getAbsoluteContentY ( ) + 0.5 * ctx . getEm ( ) ) ; int r = ( int ) Math . round ( 0.4 * ctx . getEm ( ) ) ; if ( styleType == CSSProperty . ListStyleType . CIRCLE ) g . drawOval ( x , y , r , r ) ; else if ( styleType == CSSProperty . ListStyleType . SQUARE ) g . fillRect ( x , y , r , r ) ; // else if ( type = = CSSProperty . ListStyleType . BOX ) / / not documented , recognized by Konqueror
// g . drawRect ( x , y , r , r ) ;
else if ( styleType == CSSProperty . ListStyleType . DISC ) g . fillOval ( x , y , r , r ) ; else if ( styleType != CSSProperty . ListStyleType . NONE ) drawText ( g , getMarkerText ( ) ) ; |
public class StringEntityRepository { /** * This method allows to replace all string children , it will remove any children which are not in the list , add the
* new ones and update which are in the list .
* @ param strings string children list to replace . */
public void replaceStringChildren ( List < String > strings ) { } } | ArrayList < StringEntity > entities = new ArrayList < > ( ) ; for ( String string : strings ) { StringEntity entity = new StringEntity ( ) ; entity . setValue ( string ) ; entities . add ( entity ) ; } replaceAll ( entities ) ; |
public class CmsPreviewDialog { /** * Initializes the locale selector if needed . < p >
* @ param previewInfo the preview info */
private void initLocales ( CmsPreviewInfo previewInfo ) { } } | if ( m_localeSelect != null ) { removeButton ( m_localeSelect ) ; m_localeSelect = null ; } if ( previewInfo . hasAdditionalLocales ( ) ) { m_localeSelect = new CmsSelectBox ( previewInfo . getLocales ( ) ) ; m_localeSelect . setFormValueAsString ( previewInfo . getLocale ( ) ) ; m_localeSelect . addValueChangeHandler ( new ValueChangeHandler < String > ( ) { public void onValueChange ( ValueChangeEvent < String > event ) { loadOtherLocale ( event . getValue ( ) ) ; } } ) ; Style style = m_localeSelect . getElement ( ) . getStyle ( ) ; style . setWidth ( SELECTBOX_WIDTH , Unit . PX ) ; style . setFloat ( com . google . gwt . dom . client . Style . Float . LEFT ) ; style . setMargin ( 0 , Unit . PX ) ; m_localeSelect . truncate ( TRUNCATION_PREFIX , SELECTBOX_WIDTH - 20 ) ; addButton ( m_localeSelect ) ; } |
public class Headers { /** * Get charset set in content type header .
* @ return the charset , or defaultCharset if no charset is set . */
public Charset getCharset ( Charset defaultCharset ) { } } | String contentType = getHeader ( HttpHeaders . NAME_CONTENT_TYPE ) ; if ( contentType == null ) { return defaultCharset ; } String [ ] items = contentType . split ( ";" ) ; for ( String item : items ) { item = item . trim ( ) ; if ( item . isEmpty ( ) ) { continue ; } int idx = item . indexOf ( '=' ) ; if ( idx < 0 ) { continue ; } String key = item . substring ( 0 , idx ) . trim ( ) ; if ( key . equalsIgnoreCase ( "charset" ) ) { try { return Charset . forName ( item . substring ( idx + 1 ) . trim ( ) ) ; } catch ( IllegalCharsetNameException | UnsupportedCharsetException e ) { return defaultCharset ; } } } return defaultCharset ; |
public class Query { /** * < code >
* Add a value refinement . Takes a refinement name , a value , and whether or not to exclude this refinement .
* < / code >
* @ param navigationName
* The name of the navigation
* @ param value
* The refinement value
* @ param exclude
* True if the results should exclude this value refinement , false otherwise */
public Query addValueRefinement ( String navigationName , String value , boolean exclude ) { } } | return addRefinement ( navigationName , new RefinementValue ( ) . setValue ( value ) . setExclude ( exclude ) ) ; |
public class TemplateAstMatcher { /** * Creates a template parameter or string literal template node . */
private Node createTemplateParameterNode ( int index , JSType type , boolean isStringLiteral ) { } } | checkState ( index >= 0 ) ; checkNotNull ( type ) ; Node n = Node . newNumber ( index ) ; if ( isStringLiteral ) { n . setToken ( TEMPLATE_STRING_LITERAL ) ; } else { n . setToken ( TEMPLATE_TYPE_PARAM ) ; } n . setJSType ( type ) ; return n ; |
public class SchemaStoreItemStream { /** * Remove an item from our index */
void removeFromIndex ( SchemaStoreItem item ) { } } | schemaIndex . remove ( item . getSchema ( ) . getLongID ( ) ) ; item . setStream ( null ) ; |
public class JBBPSafeInstantiator { /** * Find a constructor for an inner class .
* @ param klazz a class to find a constructor , must not be null
* @ param declaringClass the declaring class for the class , must not be null
* @ return found constructor to be used to make an instance */
private static Constructor < ? > findConstructorForInnerClass ( final Class < ? > klazz , final Class < ? > declaringClass ) { } } | final Constructor < ? > [ ] constructors = klazz . getDeclaredConstructors ( ) ; if ( constructors . length == 1 ) { return constructors [ 0 ] ; } for ( final Constructor < ? > c : constructors ) { final Class < ? > [ ] params = c . getParameterTypes ( ) ; if ( params . length == 1 && params [ 0 ] == declaringClass ) { return c ; } } return constructors [ 0 ] ; |
public class ElasticSearchRestDAOV5 { /** * Initializes the index with the required templates and mappings . */
private void initIndex ( ) throws Exception { } } | // 0 . Add the tasklog template
if ( doesResourceNotExist ( "/_template/tasklog_template" ) ) { logger . info ( "Creating the index template 'tasklog_template'" ) ; InputStream stream = ElasticSearchDAOV5 . class . getResourceAsStream ( "/template_tasklog.json" ) ; byte [ ] templateSource = IOUtils . toByteArray ( stream ) ; HttpEntity entity = new NByteArrayEntity ( templateSource , ContentType . APPLICATION_JSON ) ; try { elasticSearchAdminClient . performRequest ( HttpMethod . PUT , "/_template/tasklog_template" , Collections . emptyMap ( ) , entity ) ; } catch ( IOException e ) { logger . error ( "Failed to initialize tasklog_template" , e ) ; } } |
public class Pac4jPrincipal { /** * Returns a name for the principal based upon one of the attributes
* of the main CommonProfile . The attribute name used to query the CommonProfile
* is specified in the constructor .
* @ return a name for the Principal or null if the attribute is not populated . */
@ Override public String getName ( ) { } } | CommonProfile profile = this . getProfile ( ) ; if ( null == principalNameAttribute ) { return profile . getId ( ) ; } Object attrValue = profile . getAttribute ( principalNameAttribute ) ; return ( null == attrValue ) ? null : String . valueOf ( attrValue ) ; |
public class TopologyInfo { /** * Split totalOwnedSegments segments into the given locations recursively .
* @ param locations List of locations of the same level , sorted descending by capacity factor */
private void splitExpectedOwnedSegments ( Collection < ? extends Location > locations , float totalOwnedSegments , float totalCapacity ) { } } | float remainingCapacity = totalCapacity ; float remainingOwned = totalOwnedSegments ; // First pass , assign expected owned segments for locations with too little capacity
// We know we can do it without a loop because locations are ordered descending by capacity
List < Location > remainingLocations = new ArrayList < > ( locations ) ; for ( ListIterator < Location > it = remainingLocations . listIterator ( locations . size ( ) ) ; it . hasPrevious ( ) ; ) { Location location = it . previous ( ) ; if ( remainingOwned < numSegments * remainingLocations . size ( ) ) break ; // We don ' t have enough locations , so each location must own at least numSegments segments
int minOwned = numSegments ; float locationOwned = remainingOwned * location . totalCapacity / remainingCapacity ; if ( locationOwned > minOwned ) break ; splitExpectedOwnedSegments2 ( location . getChildren ( ) , minOwned , location . totalCapacity ) ; remainingCapacity -= location . totalCapacity ; remainingOwned -= location . expectedOwnedSegments ; it . remove ( ) ; } // Second pass , assign expected owned segments for locations with too much capacity
// We know we can do it without a loop because locations are ordered descending by capacity
for ( Iterator < ? extends Location > it = remainingLocations . iterator ( ) ; it . hasNext ( ) ; ) { Location location = it . next ( ) ; float maxOwned = computeMaxOwned ( remainingOwned , remainingLocations . size ( ) ) ; float locationOwned = remainingOwned * location . totalCapacity / remainingCapacity ; if ( locationOwned < maxOwned ) break ; splitExpectedOwnedSegments2 ( location . getChildren ( ) , maxOwned , location . totalCapacity ) ; remainingCapacity -= location . totalCapacity ; remainingOwned -= maxOwned ; it . remove ( ) ; } // If there were exactly numSegments segments per location , we ' re finished here
if ( remainingLocations . isEmpty ( ) ) return ; // Third pass : If more than numSegments segments per location , split segments between their children
// Else spread remaining segments based only on the capacity , rounding down
if ( remainingLocations . size ( ) * numSegments < remainingOwned ) { List < Location > childrenLocations = new ArrayList < > ( remainingLocations . size ( ) * 2 ) ; for ( Location location : remainingLocations ) { childrenLocations . addAll ( location . getChildren ( ) ) ; } Collections . sort ( childrenLocations ) ; splitExpectedOwnedSegments2 ( childrenLocations , remainingOwned , remainingCapacity ) ; } else { // The allocation algorithm can assign more segments to nodes , so it ' s ok to miss some segments here
float fraction = remainingOwned / remainingCapacity ; for ( Location location : remainingLocations ) { float locationOwned = location . totalCapacity * fraction ; splitExpectedOwnedSegments2 ( location . getChildren ( ) , locationOwned , location . totalCapacity ) ; } } |
public class AbcGrammar { /** * abc - tune : : = abc - header abc - music eol */
public Rule AbcTune ( ) { } } | return Sequence ( AbcHeader ( ) , AbcMusic ( ) , FirstOf ( WhiteLines ( ) , Eols ( ) , EOI ) . suppressNode ( ) ) . label ( AbcTune ) ; |
public class MoreQueues { /** * 支持后进先出的栈 , 用ArrayDeque实现 , 经过Collections # asLifoQueue ( ) 转换顺序
* 需设置初始长度 , 默认为16 , 数组满时成倍扩容
* @ see Collections # asLifoQueue ( ) */
public static < E > Queue < E > createStack ( int initSize ) { } } | return Collections . asLifoQueue ( new ArrayDeque < E > ( initSize ) ) ; |
public class SocketChannelStream { /** * Initialize the SocketStream with a new Socket .
* @ param s the new socket . */
public void init ( SocketChannel s ) { } } | _s = s ; try { s . setOption ( StandardSocketOptions . TCP_NODELAY , true ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; ; } // _ is = null ;
// _ os = null ;
_needsFlush = false ; _readBuffer . clear ( ) . flip ( ) ; _writeBuffer . clear ( ) ; |
public class DocumentRevisionTree { /** * < p > Returns the child with a given revision ID of a parent
* { @ link InternalDocumentRevision } . < / p >
* @ param parentNode parent { @ code DocumentRevision }
* @ param childRevision revision to look for in child nodes
* @ return the child with a given revision ID of a { @ code DocumentRevision } . */
public InternalDocumentRevision lookupChildByRevId ( InternalDocumentRevision parentNode , String childRevision ) { } } | Misc . checkNotNull ( parentNode , "Parent node" ) ; Misc . checkArgument ( sequenceMap . containsKey ( parentNode . getSequence ( ) ) , "The given parent DocumentRevision must be in the tree." ) ; DocumentRevisionNode p = sequenceMap . get ( parentNode . getSequence ( ) ) ; Iterator i = p . iterateChildren ( ) ; while ( i . hasNext ( ) ) { DocumentRevisionNode n = ( DocumentRevisionNode ) i . next ( ) ; if ( n . getData ( ) . getRevision ( ) . equals ( childRevision ) ) { return n . getData ( ) ; } } return null ; |
public class AbstractClientProvider { /** * Initialize the service discovery client , we will reuse that
* every time we need to create a new client . */
private void initDiscovery ( ) { } } | if ( discoveryServiceClient == null ) { LOG . info ( "No DiscoveryServiceClient provided. Skipping service discovery." ) ; return ; } endpointStrategy = new TimeLimitEndpointStrategy ( new RandomEndpointStrategy ( discoveryServiceClient . discover ( configuration . get ( TxConstants . Service . CFG_DATA_TX_DISCOVERY_SERVICE_NAME , TxConstants . Service . DEFAULT_DATA_TX_DISCOVERY_SERVICE_NAME ) ) ) , 2 , TimeUnit . SECONDS ) ; |
public class DefaultPassConfig { /** * Create a compiler pass that runs the given passes in serial . */
private static CompilerPass runInSerial ( final Collection < CompilerPass > passes ) { } } | return new CompilerPass ( ) { @ Override public void process ( Node externs , Node root ) { for ( CompilerPass pass : passes ) { pass . process ( externs , root ) ; } } } ; |
public class AmazonS3Client { /** * Copies a source object to a part of a multipart upload .
* To copy an object , the caller ' s account must have read access to the source object and
* write access to the destination bucket .
* If constraints are specified in the < code > CopyPartRequest < / code >
* ( e . g .
* { @ link CopyPartRequest # setMatchingETagConstraints ( List ) } )
* and are not satisfied when Amazon S3 receives the
* request , this method returns < code > null < / code > .
* This method returns a non - null result under all other
* circumstances .
* @ param copyPartRequest
* The request object containing all the options for copying an
* Amazon S3 object .
* @ return A { @ link CopyPartResult } object containing the information
* returned by Amazon S3 about the newly created object , or < code > null < / code > if
* constraints were specified that weren ' t met when Amazon S3 attempted
* to copy the object .
* @ throws SdkClientException
* If any errors are encountered in the client while making the
* request or handling the response .
* @ throws AmazonServiceException
* If any errors occurred in Amazon S3 while processing the
* request .
* @ see AmazonS3 # copyObject ( CopyObjectRequest )
* @ see AmazonS3 # initiateMultipartUpload ( InitiateMultipartUploadRequest ) */
@ Override public CopyPartResult copyPart ( CopyPartRequest copyPartRequest ) { } } | copyPartRequest = beforeClientExecution ( copyPartRequest ) ; rejectNull ( copyPartRequest . getSourceBucketName ( ) , "The source bucket name must be specified when copying a part" ) ; rejectNull ( copyPartRequest . getSourceKey ( ) , "The source object key must be specified when copying a part" ) ; rejectNull ( copyPartRequest . getDestinationBucketName ( ) , "The destination bucket name must be specified when copying a part" ) ; rejectNull ( copyPartRequest . getUploadId ( ) , "The upload id must be specified when copying a part" ) ; rejectNull ( copyPartRequest . getDestinationKey ( ) , "The destination object key must be specified when copying a part" ) ; rejectNull ( copyPartRequest . getPartNumber ( ) , "The part number must be specified when copying a part" ) ; String destinationKey = copyPartRequest . getDestinationKey ( ) ; String destinationBucketName = copyPartRequest . getDestinationBucketName ( ) ; Request < CopyPartRequest > request = createRequest ( destinationBucketName , destinationKey , copyPartRequest , HttpMethodName . PUT ) ; request . addHandlerContext ( HandlerContextKey . OPERATION_NAME , "UploadPartCopy" ) ; populateRequestWithCopyPartParameters ( request , copyPartRequest ) ; request . addParameter ( "uploadId" , copyPartRequest . getUploadId ( ) ) ; request . addParameter ( "partNumber" , Integer . toString ( copyPartRequest . getPartNumber ( ) ) ) ; populateRequesterPaysHeader ( request , copyPartRequest . isRequesterPays ( ) ) ; /* * We can ' t send a non - zero length Content - Length header if the user
* specified it , otherwise it messes up the HTTP connection when the
* remote server thinks there ' s more data to pull . */
setZeroContentLength ( request ) ; CopyObjectResultHandler copyObjectResultHandler = null ; try { @ SuppressWarnings ( "unchecked" ) ResponseHeaderHandlerChain < CopyObjectResultHandler > handler = new ResponseHeaderHandlerChain < CopyObjectResultHandler > ( // xml payload unmarshaller
new Unmarshallers . CopyObjectUnmarshaller ( ) , // header handlers
new ServerSideEncryptionHeaderHandler < CopyObjectResultHandler > ( ) , new S3VersionHeaderHandler < CopyObjectResultHandler > ( ) ) ; copyObjectResultHandler = invoke ( request , handler , destinationBucketName , destinationKey ) ; } catch ( AmazonS3Exception ase ) { /* * If the request failed because one of the specified constraints
* was not met ( ex : matching ETag , modified since date , etc . ) , then
* return null , so that users don ' t have to wrap their code in
* try / catch blocks and check for this status code if they want to
* use constraints . */
if ( ase . getStatusCode ( ) == Constants . FAILED_PRECONDITION_STATUS_CODE ) { return null ; } throw ase ; } /* * CopyPart has two failure modes : 1 - An HTTP error code is returned
* and the error is processed like any other error response . 2 - An HTTP
* 200 OK code is returned , but the response content contains an XML
* error response .
* This makes it very difficult for the client runtime to cleanly detect
* this case and handle it like any other error response . We could
* extend the runtime to have a more flexible / customizable definition of
* success / error ( per request ) , but it ' s probably overkill for this one
* special case . */
if ( copyObjectResultHandler . getErrorCode ( ) != null ) { String errorCode = copyObjectResultHandler . getErrorCode ( ) ; String errorMessage = copyObjectResultHandler . getErrorMessage ( ) ; String requestId = copyObjectResultHandler . getErrorRequestId ( ) ; String hostId = copyObjectResultHandler . getErrorHostId ( ) ; AmazonS3Exception ase = new AmazonS3Exception ( errorMessage ) ; ase . setErrorCode ( errorCode ) ; ase . setErrorType ( ErrorType . Service ) ; ase . setRequestId ( requestId ) ; ase . setExtendedRequestId ( hostId ) ; ase . setServiceName ( request . getServiceName ( ) ) ; ase . setStatusCode ( 200 ) ; throw ase ; } CopyPartResult copyPartResult = new CopyPartResult ( ) ; copyPartResult . setETag ( copyObjectResultHandler . getETag ( ) ) ; copyPartResult . setPartNumber ( copyPartRequest . getPartNumber ( ) ) ; copyPartResult . setLastModifiedDate ( copyObjectResultHandler . getLastModified ( ) ) ; copyPartResult . setVersionId ( copyObjectResultHandler . getVersionId ( ) ) ; copyPartResult . setSSEAlgorithm ( copyObjectResultHandler . getSSEAlgorithm ( ) ) ; copyPartResult . setSSECustomerAlgorithm ( copyObjectResultHandler . getSSECustomerAlgorithm ( ) ) ; copyPartResult . setSSECustomerKeyMd5 ( copyObjectResultHandler . getSSECustomerKeyMd5 ( ) ) ; return copyPartResult ; |
public class GenerateConfigMojo { /** * Creates scanner directives from artifact to be parsed by pax runner .
* Also includes options found and matched in settings part of configuration .
* @ param artifact to be used to create scanner directive .
* @ param optionTokens to be used to create scanner directive .
* @ return pax runner compatible scanner directive . */
private String createPaxRunnerScan ( Artifact artifact , String optionTokens ) { } } | return "scan-bundle:" + artifact . getFile ( ) . toURI ( ) . normalize ( ) . toString ( ) + "@update" + optionTokens ; |
public class ManifestElementReader { /** * This method binds a ChunksManifest xml document to a ChunksManifest
* object
* @ param doc ChunksManifest xml document
* @ return ChunksManifest object */
public static ChunksManifest createManifestFrom ( ChunksManifestDocument doc ) { } } | ChunksManifestType manifestType = doc . getChunksManifest ( ) ; HeaderType headerType = manifestType . getHeader ( ) ; ChunksManifest . ManifestHeader header = createHeaderFromElement ( headerType ) ; ChunksType chunksType = manifestType . getChunks ( ) ; List < ChunksManifestBean . ManifestEntry > entries = createEntriesFromElement ( chunksType ) ; ChunksManifestBean manifestBean = new ChunksManifestBean ( ) ; manifestBean . setHeader ( header ) ; manifestBean . setEntries ( entries ) ; return new ChunksManifest ( manifestBean ) ; |
public class CurrencyDateCalculatorBuilder { /** * If brokenDate is not allowed , we do require to check the WorkingWeek and Holiday for the crossCcy when
* validating the SpotDate or a Tenor date .
* @ param crossCcyCalendar the set of holidays for the crossCcy
* @ return the builder */
public CurrencyDateCalculatorBuilder < E > crossCcyCalendar ( final HolidayCalendar < E > crossCcyCalendar ) { } } | if ( crossCcyCalendar != null ) { this . crossCcyCalendar = crossCcyCalendar ; } return this ; |
public class SqlParserImpl { /** * IF文解析 */
protected void parseIf ( ) { } } | String condition = tokenizer . getToken ( ) . substring ( 2 ) ; if ( StringUtils . isBlank ( condition ) ) { throw new IfConditionNotFoundRuntimeException ( ) ; } IfNode ifNode = new IfNode ( Math . max ( this . position - 2 , 0 ) , condition ) ; this . position = this . tokenizer . getPosition ( ) ; peek ( ) . addChild ( ifNode ) ; push ( ifNode ) ; parseEnd ( ) ; |
public class CustomField { /** * Gets the dataType value for this CustomField .
* @ return dataType * The type of data this custom field contains . This attribute
* is read - only
* if there exists a { @ link CustomFieldValue } for this
* field . */
public com . google . api . ads . admanager . axis . v201902 . CustomFieldDataType getDataType ( ) { } } | return dataType ; |
public class AssessmentRun { /** * A list of notifications for the event subscriptions . A notification about a particular generated finding is added
* to this list only once .
* @ param notifications
* A list of notifications for the event subscriptions . A notification about a particular generated finding
* is added to this list only once . */
public void setNotifications ( java . util . Collection < AssessmentRunNotification > notifications ) { } } | if ( notifications == null ) { this . notifications = null ; return ; } this . notifications = new java . util . ArrayList < AssessmentRunNotification > ( notifications ) ; |
public class DelayedGitServiceInitializer { /** * Sets the common reference an releases any threads waiting for the reference to be set .
* @ param git */
public void setGitService ( GitService git ) { } } | if ( git != null ) { logger . debug ( "Setting git service" ) ; this . git = git ; latch . countDown ( ) ; } |
public class NotEmptyIfOtherIsNotEmptyValidator { /** * { @ inheritDoc } initialize the validator .
* @ see javax . validation . ConstraintValidator # initialize ( java . lang . annotation . Annotation ) */
@ Override public final void initialize ( final NotEmptyIfOtherIsNotEmpty pconstraintAnnotation ) { } } | message = pconstraintAnnotation . message ( ) ; fieldCheckName = pconstraintAnnotation . field ( ) ; fieldCompareName = pconstraintAnnotation . fieldCompare ( ) ; |
public class MultiScopeRecoveryLog { /** * Instructs the recovery log to perfom a keypoint operation . Any redundant
* information will be removed and all cached information will be forced to disk .
* @ exception LogClosedException Thrown if the log is closed .
* @ exception InternalLogException Thrown if an unexpected error has occured .
* @ exception LogIncompatibleException An attempt has been made access a recovery
* log that is not compatible with this version
* of the service . */
@ Override public void keypoint ( ) throws LogClosedException , InternalLogException , LogIncompatibleException { } } | if ( tc . isEntryEnabled ( ) ) Tr . entry ( tc , "keypoint" , this ) ; // If this recovery log instance has been marked as incompatible then throw an exception
// accordingly .
if ( incompatible ( ) ) { if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , "LogIncompatibleException" ) ; throw new LogIncompatibleException ( ) ; } // If this recovery log instance has experienced a serious internal error then prevent this operation from
// executing .
if ( failed ( ) ) { if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , this ) ; throw new InternalLogException ( null ) ; } // Check that the log is open .
if ( _logHandle == null ) { if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , "LogClosedException" ) ; throw new LogClosedException ( null ) ; } // Try and obtain the exclusive lock that must be held to perform keypoint processing . If this
// lock is denied then another thread is performing the required keypoint operation and so
// this method can return after checking that the other thread sucessfully keypointed .
boolean obtainedLock = false ; try { obtainedLock = _controlLock . attemptExclusiveLock ( ) ; } catch ( HoldingExclusiveLockException exc ) { // This thread already holds the exclusive lock . That means that the keypoint operation is already
// in progress on this thread . This would occur if the keypoint operation actually triggered a
// keypoint operation . This should never occur in paractice and would represent a serious internal
// error .
FFDCFilter . processException ( exc , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1353" , this ) ; if ( tc . isDebugEnabled ( ) ) Tr . debug ( tc , "The keypoint operation has triggered a keypoint operation." ) ; markFailed ( exc ) ; /* @ MD19484C */
if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , "InternalLogException" ) ; throw new InternalLogException ( exc ) ; } if ( obtainedLock ) { // This thread has been allocated the exclusive lock . That means that we will perform a ' real ' keypoint
// operation on this thread .
// Inform the underlying log handle that we are about to process a keypoint operation . It can
// use this opportunity to switch files and re - calculate the amount of avialble free space .
try { _logHandle . keypointStarting ( ) ; } catch ( InternalLogException exc ) { FFDCFilter . processException ( exc , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1373" , this ) ; markFailed ( exc ) ; /* @ MD19484C */
try { _controlLock . releaseExclusiveLock ( ) ; } catch ( Throwable exc2 ) { // IGNORE - The recovery log service is failing anyway .
FFDCFilter . processException ( exc2 , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1384" , this ) ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , exc ) ; throw exc ; } catch ( Throwable exc ) { FFDCFilter . processException ( exc , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1392" , this ) ; markFailed ( exc ) ; /* @ MD19484C */
try { _controlLock . releaseExclusiveLock ( ) ; } catch ( Throwable exc2 ) { // IGNORE - The recovery log service is failing anyway .
FFDCFilter . processException ( exc2 , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1403" , this ) ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , "InternalLogException" ) ; throw new InternalLogException ( exc ) ; } // Check that there is sufficient space available in the log
// to perform this keypoint operation .
if ( tc . isDebugEnabled ( ) ) { int targetFreeSpace = _logHandle . getFreeSpace ( ) ; Tr . debug ( tc , "Recovery log contains " + _totalDataSize + " payload bytes" ) ; Tr . debug ( tc , "Target keypoint file has " + targetFreeSpace + " available free bytes" ) ; Tr . debug ( tc , "Resize trigger constant is " + TOTAL_DATA_RESIZE_TRIGGER ) ; Tr . debug ( tc , "Resize trigger value is " + targetFreeSpace * TOTAL_DATA_RESIZE_TRIGGER + " bytes" ) ; } if ( _totalDataSize > ( _logHandle . getFreeSpace ( ) * TOTAL_DATA_RESIZE_TRIGGER ) ) { // There is insufficient space in the underlying file to write all of the log ' s data
// while maintaining the required amount of free space in the log . We must , if possible ,
// resize it .
// Determine the target for the log ' s size , capping it to the maximum size specified by the user .
final int logFileHeaderSize ; /* @ MD19753A */
try /* @ MD19753A */
{ /* @ MD19753A */
logFileHeaderSize = _logHandle . logFileHeader ( ) . length ( ) ; /* @ MD19753A */
} /* @ MD19753A */
catch ( InternalLogException ile ) /* @ MD19753A */
{ /* 3 @ MD19753A */
if ( tc . isEventEnabled ( ) ) Tr . debug ( tc , "Could not get log file header length" , ile ) ; FFDCFilter . processException ( ile , "com.ibm.ws.recoverylog.spi.RecoveryLogImpl.keypoint" , "1780" , this ) ; markFailed ( ile ) ; /* @ MD19753A */
try /* @ MD19753A */
{ /* @ MD19753A */
_controlLock . releaseExclusiveLock ( ) ; /* @ MD19753A */
} /* @ MD19753A */
catch ( Throwable exc ) /* @ MD19753A */
{ /* @ MD19753A */
// IGNORE - The recovery log service is failing anyway . 2 @ MD19753A
FFDCFilter . processException ( exc , "com.ibm.ws.recoverylog.spi.RecoveryLogImpl.keypoint" , "1791" , this ) ; } /* @ MD19753A */
if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" ) ; /* @ MD19753A */
throw ile ; /* @ MD19753A */
} /* @ MD19753A */
catch ( LogIncompatibleException lie ) { FFDCFilter . processException ( lie , "com.ibm.ws.recoverylog.spi.RecoveryLogImpl.keypoint" , "1575" , this ) ; // Unlike some instances of LogIncompatibleException that occur when initially opening a recovery
// log , this instance is unlikely to occur unless there is a bug in the code . We check the
// version code when we initially open the log and if its not compatible we clear the state and
// stop responding . To get as far as a keypoint before this is detected should not occur . Thats
// why we convert this exception into an InternalLogException .
markFailed ( lie ) ; try { _controlLock . releaseExclusiveLock ( ) ; } catch ( Throwable exc ) { // No FFDC code needed
// IGNORE - The recovery log service is failing anyway .
} if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" ) ; throw new InternalLogException ( lie ) ; } final int targetSize = Math . min ( ( int ) ( _totalDataSize * TOTAL_DATA_RESIZE_MULTIPLIER ) , ( ( _maxLogFileSize * 1024 ) - logFileHeaderSize ) ) ; /* @ MD19753C */
if ( targetSize < _totalDataSize ) { // The log cannot be resized to accommodate all of its data . Mark it as failed to prevent
// further I / O occuring and throw the LogFullException back to the caller . Note that we must
// mark it as failed BEFORE releasing the exclsuive lock to ensure that any other threads
// that are waiting on the lock will be able to detect the failure when they wake up .
LogFullException lfe = new LogFullException ( null ) ; /* @ MD19484M */
markFailed ( lfe ) ; /* @ MD19484C */
try { _controlLock . releaseExclusiveLock ( ) ; } catch ( Throwable exc2 ) { // IGNORE - The recovery log service is failing anyway .
FFDCFilter . processException ( exc2 , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1446" , this ) ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , "LogFullException" ) ; throw lfe ; /* @ MD19484C */
} // Resize the log to the target size
_logHandle . resizeLog ( targetSize ) ; } try { final Iterator recoverableUnits = _recoverableUnits . values ( ) . iterator ( ) ; while ( recoverableUnits . hasNext ( ) ) { final RecoverableUnitImpl recoverableUnit = ( RecoverableUnitImpl ) recoverableUnits . next ( ) ; recoverableUnit . writeSections ( true ) ; } _logHandle . keypoint ( ) ; } catch ( Throwable exc ) { // First try and release the locks . Since the recovery log has suffered a fatal error condition
// we ignore any failures that occur here .
FFDCFilter . processException ( exc , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1478" , this ) ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Exception caught performing keypoint" , exc ) ; markFailed ( exc ) ; try { _controlLock . releaseExclusiveLock ( ) ; } catch ( Throwable exc2 ) { // IGNORE - The recovery log service is failing anyway .
FFDCFilter . processException ( exc2 , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1491" , this ) ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , "InternalLogException" ) ; throw new InternalLogException ( exc ) ; } // issue warning message if log is filling up
if ( ! _logWarningIssued && _totalDataSize > ( ( _maxLogFileSize * 1024 - _totalDataSize ) * LOG_WARNING_FACTOR ) ) { if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Logfile is filling up, issuing warning." , _logName ) ; _logWarningIssued = true ; try { _recoveryAgent . logFileWarning ( _logName , _totalDataSize , _maxLogFileSize * 1024 ) ; } catch ( Throwable t ) { // shouldn ' t happen , swallow to ensure lock released
FFDCFilter . processException ( t , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1511" , this ) ; } } try { _controlLock . releaseExclusiveLock ( ) ; } catch ( NoExclusiveLockException exc2 ) { // This should not occur as we did get the exclusive lock at the top of the method . If this
// does occur all we can do is throw an InternalLogException exception .
FFDCFilter . processException ( exc2 , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1506" , this ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , "InternalLogException" ) ; throw new InternalLogException ( exc2 ) ; } catch ( Throwable exc ) { FFDCFilter . processException ( exc , "com.ibm.ws.recoverylog.spi.MultiScopeRecoveryLog.keypoint" , "1512" , this ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , "InternalLogException" ) ; throw new InternalLogException ( exc ) ; } } else { // This thread has not been allocated the exclusive lock . This has occured because some other thread
// was allocated the exclusive lock , performed a ' real ' keypoint and then released the exclusive lock
// again . This thread has been blocked inside the ' attemptExclusiveLock ' call whilst this has
// been going on and has performed a ' piggyback ' keypoint - ie all the real work has already
// been done for this thread by the ' real ' keypoint , so we just exit the method as if we had done
// the keypoint directly .
// Check that no serious internal error occured during the real keypoint operation . If it did then
// this " piggybacked " keypoint has logically also failed . This must be reported to
// the caller just as if it was this thread that encountered the problem .
if ( failed ( ) ) { if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" , this ) ; throw new InternalLogException ( null ) ; } } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "keypoint" ) ; |
public class Payout { /** * Retrieves the details of an existing payout . Supply the unique payout ID from either a payout
* creation request or the payout list , and Stripe will return the corresponding payout
* information . */
public static Payout retrieve ( String payout ) throws StripeException { } } | return retrieve ( payout , ( Map < String , Object > ) null , ( RequestOptions ) null ) ; |
public class GroupByDOMOperator { /** * Takes the leaf areas and tries to join the homogeneous paragraphs . */
private void groupByDOM ( AreaImpl root ) { } } | if ( root . getChildCount ( ) > 1 ) findSuperAreas ( root , 1 ) ; for ( int i = 0 ; i < root . getChildCount ( ) ; i ++ ) groupByDOM ( ( AreaImpl ) root . getChildAt ( i ) ) ; |
public class JsonRpcMultiServer { /** * Get the handler ( object ) that should be invoked to execute the specified
* RPC method based on the specified service name .
* @ param serviceName the service name
* @ return the handler to invoke the RPC call against */
@ Override protected Object getHandler ( String serviceName ) { } } | Object handler = handlerMap . get ( serviceName ) ; if ( handler == null ) { logger . error ( "Service '{}' is not registered in this multi-server" , serviceName ) ; throw new RuntimeException ( "Service '" + serviceName + "' does not exist" ) ; } return handler ; |
public class Profile { /** * Returns the config for { @ code type } , or null if it is not configured . */
private @ Nullable TypeConfigElement typeConfig ( ProtoType type ) { } } | for ( ProfileFileElement element : profileFiles ) { for ( TypeConfigElement typeConfig : element . getTypeConfigs ( ) ) { if ( typeConfig . getType ( ) . equals ( type . toString ( ) ) ) return typeConfig ; } } return null ; |
public class MediaType { /** * Returns { @ code true } if this { @ link MediaType } belongs to the given { @ link MediaType } .
* Similar to what { @ link MediaType # is ( MediaType ) } does except that this one compares the parameters
* case - insensitively and excludes ' q ' parameter . */
public boolean belongsTo ( MediaType mediaTypeRange ) { } } | return ( mediaTypeRange . type . equals ( WILDCARD ) || mediaTypeRange . type . equals ( type ) ) && ( mediaTypeRange . subtype . equals ( WILDCARD ) || mediaTypeRange . subtype . equals ( subtype ) ) && containsAllParameters ( mediaTypeRange . parameters ( ) , parameters ( ) ) ; |
public class FundamentalLinear8 { /** * Computes a fundamental or essential matrix from a set of associated point correspondences .
* @ param points List of corresponding image coordinates . In pixel for fundamental matrix or
* normalized coordinates for essential matrix .
* @ return true If successful or false if it failed */
public boolean process ( List < AssociatedPair > points , DMatrixRMaj solution ) { } } | if ( points . size ( ) < 8 ) throw new IllegalArgumentException ( "Must be at least 8 points. Was only " + points . size ( ) ) ; // use normalized coordinates for pixel and calibrated
// TODO re - evaluate decision to normalize for calibrated case
LowLevelMultiViewOps . computeNormalization ( points , N1 , N2 ) ; createA ( points , A ) ; if ( process ( A , solution ) ) return false ; // undo normalization on F
PerspectiveOps . multTranA ( N2 . matrix ( ) , solution , N1 . matrix ( ) , solution ) ; if ( computeFundamental ) return projectOntoFundamentalSpace ( solution ) ; else return projectOntoEssential ( solution ) ; |
public class Option { /** * If a value is present , apply the provided { @ code Option } - bearing
* mapping function to it , return that result , otherwise return
* { @ link # NONE } . This method is similar to { @ link # map ( Function ) } ,
* but the provided mapper is one whose result is already an
* { @ code Option } , and if invoked , { @ code flatMap } does not wrap it
* with an additional { @ code Option } .
* @ param < B >
* The type parameter to the { @ code Option } returned by
* @ param mapper
* a mapping function to apply to the value ,
* @ return the result of applying an { @ code Option } - bearing mapping
* function to the value of this { @ code Option } , if a value
* is present , otherwise { @ link # NONE }
* @ throws NullPointerException
* if the mapping function is { @ code null }
* or returns a { @ code null } result
* @ since 0.2 */
@ SuppressWarnings ( "unchecked" ) public final < B > Option < B > flatMap ( final Function < ? super T , Option < B > > mapper ) { } } | E . NPE ( mapper ) ; Option < B > result = isDefined ( ) ? mapper . apply ( get ( ) ) : NONE ; E . NPE ( null == result ) ; return result ; |
public class UnicodeSet { /** * Returns the number of elements in this set ( its cardinality )
* Note than the elements of a set may include both individual
* codepoints and strings .
* @ return the number of elements in this set ( its cardinality ) . */
public int size ( ) { } } | int n = 0 ; int count = getRangeCount ( ) ; for ( int i = 0 ; i < count ; ++ i ) { n += getRangeEnd ( i ) - getRangeStart ( i ) + 1 ; } return n + strings . size ( ) ; |
public class DWOF { /** * This method prepares a container for the radii of the objects and
* initializes radii according to the equation :
* initialRadii of a certain object = ( absoluteMinDist of all objects ) *
* ( avgDist of the object ) / ( minAvgDist of all objects )
* @ param ids Database IDs to process
* @ param distFunc Distance function
* @ param knnq kNN search function
* @ param radii WritableDoubleDataStore to store radii */
private void initializeRadii ( DBIDs ids , KNNQuery < O > knnq , DistanceQuery < O > distFunc , WritableDoubleDataStore radii ) { } } | FiniteProgress avgDistProgress = LOG . isVerbose ( ) ? new FiniteProgress ( "Calculating average kNN distances-" , ids . size ( ) , LOG ) : null ; double absoluteMinDist = Double . POSITIVE_INFINITY ; double minAvgDist = Double . POSITIVE_INFINITY ; // to get the mean for each object
Mean mean = new Mean ( ) ; // Iterate over all objects
for ( DBIDIter iter = ids . iter ( ) ; iter . valid ( ) ; iter . advance ( ) ) { KNNList iterNeighbors = knnq . getKNNForDBID ( iter , k ) ; // skip the point itself
mean . reset ( ) ; for ( DBIDIter neighbor1 = iterNeighbors . iter ( ) ; neighbor1 . valid ( ) ; neighbor1 . advance ( ) ) { if ( DBIDUtil . equal ( neighbor1 , iter ) ) { continue ; } for ( DBIDIter neighbor2 = iterNeighbors . iter ( ) ; neighbor2 . valid ( ) ; neighbor2 . advance ( ) ) { if ( DBIDUtil . equal ( neighbor1 , neighbor2 ) || DBIDUtil . equal ( neighbor2 , iter ) ) { continue ; } double distance = distFunc . distance ( neighbor1 , neighbor2 ) ; mean . put ( distance ) ; if ( distance > 0. && distance < absoluteMinDist ) { absoluteMinDist = distance ; } } } double currentMean = mean . getMean ( ) ; radii . putDouble ( iter , currentMean ) ; if ( currentMean < minAvgDist ) { minAvgDist = currentMean ; } LOG . incrementProcessed ( avgDistProgress ) ; } LOG . ensureCompleted ( avgDistProgress ) ; // Initializing the radii of all objects .
for ( DBIDIter iter = ids . iter ( ) ; iter . valid ( ) ; iter . advance ( ) ) { radii . putDouble ( iter , ( minAvgDist > 0 ) ? ( absoluteMinDist * radii . doubleValue ( iter ) / minAvgDist ) : Double . POSITIVE_INFINITY ) ; } |
public class DatabaseError { /** * { @ inheritDoc } */
@ Override public String getUserFacingMessage ( ) { } } | final StringBuilder bldr = new StringBuilder ( ) ; bldr . append ( getMessage ( ) ) ; bldr . append ( " for " ) ; bldr . append ( getName ( ) ) ; bldr . append ( "\n\treason: " ) ; final SQLException cause = ( SQLException ) getCause ( ) ; final int code = cause . getErrorCode ( ) ; final String msg = cause . getMessage ( ) ; bldr . append ( "(" ) ; bldr . append ( code ) ; bldr . append ( ") " ) ; bldr . append ( msg ) ; return bldr . toString ( ) ; |
public class DomainService { /** * Gets a Connector by id .
* @ param evse evse which should contain the connector .
* @ param id connector id .
* @ param exceptionIfNotFound throw EntityNotFoundException if connector cannot be found .
* @ return connector or null if it cannot be found and exceptionIfNotFound is false .
* @ throws EntityNotFoundException if exceptionIfNotFound is true and the Connector cannot be found . */
private Connector getConnectorById ( Evse evse , Long id , boolean exceptionIfNotFound ) { } } | for ( Connector connector : evse . getConnectors ( ) ) { if ( id . equals ( connector . getId ( ) ) ) { return connector ; } } if ( exceptionIfNotFound ) { throw new EntityNotFoundException ( String . format ( "Unable to find connector with id '%s'" , id ) ) ; } else { return null ; } |
public class SDBaseOps { /** * Get a subset of the specified input , by specifying the first element , last element , and the strides . < br >
* For example , if input is : < br >
* [ a , b , c ] < br >
* [ d , e , f ] < br >
* [ g , h , i ] < br >
* then stridedSlice ( input , begin = [ 0,1 ] , end = [ 2,2 ] , strides = [ 2,1 ] ) will return : < br >
* [ b , c ] < br >
* [ h , i ] < br >
* < br >
* @ param name Output variable name
* @ param input Variable to get subset of
* @ param begin Beginning index . Must be same length as rank of input array
* @ param end End index . Must be same length as the rank of the array
* @ param strides Stride ( " step size " ) for each dimension . Must be same length as the rank of the array . For example ,
* stride of 2 means take every second element .
* @ return Subset of the input */
public SDVariable stridedSlice ( String name , SDVariable input , long [ ] begin , long [ ] end , long [ ] strides ) { } } | return stridedSlice ( name , input , begin , end , strides , 0 , 0 , 0 , 0 , 0 ) ; |
public class OverviewMap { /** * Set a new style for the rectangle that shows the current position on the target map .
* @ param rectangleStyle
* rectangle style
* @ since 1.8.0 */
@ Api public void setRectangleStyle ( ShapeStyle rectangleStyle ) { } } | this . rectangleStyle = rectangleStyle ; if ( targetRectangle != null ) { targetRectangle . setStyle ( rectangleStyle ) ; render ( targetRectangle , RenderGroup . SCREEN , RenderStatus . ALL ) ; } |
public class ValueBinder { /** * Binds to { @ code Value . timestampArray ( values ) } */
public R toTimestampArray ( @ Nullable Iterable < Timestamp > values ) { } } | return handle ( Value . timestampArray ( values ) ) ; |
public class ModelUtils { /** * Resolves a setter method for a field .
* @ param field The field
* @ return An optional setter method */
Optional < ExecutableElement > findSetterMethodFor ( Element field ) { } } | String name = field . getSimpleName ( ) . toString ( ) ; if ( field . asType ( ) . getKind ( ) == TypeKind . BOOLEAN ) { if ( name . length ( ) > 2 && Character . isUpperCase ( name . charAt ( 2 ) ) ) { name = name . replaceFirst ( "^(is)(.+)" , "$2" ) ; } } String setterName = setterNameFor ( name ) ; // FIXME refine this to discover one of possible overloaded methods with correct signature ( i . e . single arg of field type )
TypeElement typeElement = classElementFor ( field ) ; if ( typeElement == null ) { return Optional . empty ( ) ; } List < ? extends Element > elements = typeElement . getEnclosedElements ( ) ; List < ExecutableElement > methods = ElementFilter . methodsIn ( elements ) ; return methods . stream ( ) . filter ( method -> { String methodName = method . getSimpleName ( ) . toString ( ) ; if ( setterName . equals ( methodName ) ) { Set < Modifier > modifiers = method . getModifiers ( ) ; return // it ' s not static
! modifiers . contains ( STATIC ) // it ' s either public or package visibility
&& modifiers . contains ( PUBLIC ) || ! ( modifiers . contains ( PRIVATE ) || modifiers . contains ( PROTECTED ) ) ; } return false ; } ) . findFirst ( ) ; |
public class UniqueId { /** * Appends the given UID to the given string buffer , followed by " \ \ E " .
* @ param buf The buffer to append
* @ param id The UID to add as a binary regex pattern
* @ since 2.1 */
public static void addIdToRegexp ( final StringBuilder buf , final byte [ ] id ) { } } | boolean backslash = false ; for ( final byte b : id ) { buf . append ( ( char ) ( b & 0xFF ) ) ; if ( b == 'E' && backslash ) { // If we saw a ` \ ' and now we have a ` E ' .
// So we just terminated the quoted section because we just added \ E
// to ` buf ' . So let ' s put a literal \ E now and start quoting again .
buf . append ( "\\\\E\\Q" ) ; } else { backslash = b == '\\' ; } } buf . append ( "\\E" ) ; |
public class PartialResponseWriter { /** * < p class = " changed _ added _ 2_0 " > Write the start of an eval operation . < / p >
* @ throws IOException if an input / output error occurs
* @ since 2.0 */
public void startEval ( ) throws IOException { } } | startChangesIfNecessary ( ) ; ResponseWriter writer = getWrapped ( ) ; writer . startElement ( "eval" , null ) ; writer . startCDATA ( ) ; |
public class MetricUtils { /** * Concatenate { @ code size } many values from the passed in arrays , starting at offset { @ code start } .
* @ param arrays arrays to concatenate
* @ param start starting offset position
* @ param size number of elements
* @ return concatenated array */
public static long [ ] concatArrays ( List < long [ ] > arrays , int start , int size ) { } } | long [ ] result = new long [ size ] ; // How many values we still need to move over
int howManyLeft = size ; // Where in the resulting array we ' re currently bulk - writing
int targetPosition = 0 ; // Where we ' re copying * from * , in ( one of ) the source array .
// Typically 0 , except maybe for the first array in the list
int from = start ; for ( int i = 0 ; i < arrays . size ( ) && howManyLeft > 0 ; i ++ ) { long [ ] current = arrays . get ( i ) ; // Can ' t copy more than the current source array size , or the grand total pointer
int howManyThisRound = Math . min ( current . length - from , howManyLeft ) ; System . arraycopy ( current , from , result , targetPosition , howManyThisRound ) ; from = 0 ; howManyLeft -= howManyThisRound ; targetPosition += howManyThisRound ; } // If this is non - zero here , means we were asked to copy more than what we were provided
if ( howManyLeft > 0 ) { throw new ArrayIndexOutOfBoundsException ( String . format ( "Not enough data, short of %d elements" , howManyLeft ) ) ; } return result ; |
public class CommercePriceListAccountRelUtil { /** * Removes the commerce price list account rel with the primary key from the database . Also notifies the appropriate model listeners .
* @ param commercePriceListAccountRelId the primary key of the commerce price list account rel
* @ return the commerce price list account rel that was removed
* @ throws NoSuchPriceListAccountRelException if a commerce price list account rel with the primary key could not be found */
public static CommercePriceListAccountRel remove ( long commercePriceListAccountRelId ) throws com . liferay . commerce . price . list . exception . NoSuchPriceListAccountRelException { } } | return getPersistence ( ) . remove ( commercePriceListAccountRelId ) ; |
public class DatePickerDialog { /** * Set the range of selectable dates .
* @ param minDay The day value of minimum date .
* @ param minMonth The month value of minimum date .
* @ param minYear The year value of minimum date .
* @ param maxDay The day value of maximum date .
* @ param maxMonth The month value of maximum date .
* @ param maxYear The year value of maximum date .
* @ return The DatePickerDialog for chaining methods . */
public DatePickerDialog dateRange ( int minDay , int minMonth , int minYear , int maxDay , int maxMonth , int maxYear ) { } } | mDatePickerLayout . setDateRange ( minDay , minMonth , minYear , maxDay , maxMonth , maxYear ) ; return this ; |
public class CompressedRamStorage { /** * Store object into storage , if it doesn ' t exist
* @ param key
* @ param object
* @ return Returns TRUE if store operation was applied , FALSE otherwise */
@ Override public boolean storeIfAbsent ( T key , INDArray object ) { } } | try { if ( emulateIsAbsent ) lock . writeLock ( ) . lock ( ) ; if ( compressedEntries . containsKey ( key ) ) { return false ; } else { store ( key , object ) ; return true ; } } finally { if ( emulateIsAbsent ) lock . writeLock ( ) . unlock ( ) ; } |
public class User { /** * api keys */
public List < ApiKey > apiKeys ( ) throws EasyPostException { } } | ApiKeys parentKeys = ApiKeys . all ( ) ; if ( this . getId ( ) == parentKeys . getId ( ) ) { return parentKeys . getKeys ( ) ; } for ( int i = 0 ; i < parentKeys . children . size ( ) ; i ++ ) { if ( this . getId ( ) . equals ( parentKeys . children . get ( i ) . getId ( ) ) ) { return parentKeys . children . get ( i ) . getKeys ( ) ; } } throw new EasyPostException ( "Unable to find api key. Please contact support@easypost.com" ) ; |
public class MsBuildParser { /** * Returns whether the warning type is of the specified type .
* @ param matcher
* the matcher
* @ param type
* the type to match with
* @ return < code > true < / code > if the warning type is of the specified type */
private boolean isOfType ( final Matcher matcher , final String type ) { } } | return StringUtils . containsIgnoreCase ( matcher . group ( 4 ) , type ) ; |