idx
int64 0
41.2k
| question
stringlengths 73
5.81k
| target
stringlengths 5
918
|
|---|---|---|
1,200
|
public static < S , I , O > int computeEffectiveResets ( final ADTNode < S , I , O > adt ) { return computeEffectiveResetsInternal ( adt , 0 ) ; }
|
Computes how often reset nodes are encountered when traversing from the given node to the leaves of the induced subtree of the given node .
|
1,201
|
public static < S , I , O > ADTNode < S , I , O > buildADSFromObservation ( final Word < I > input , final Word < O > output , final S finalState ) { if ( input . size ( ) != output . size ( ) ) { throw new IllegalArgumentException ( "Arguments differ in length" ) ; } final Iterator < I > inputIterator = input . iterator ( ) ; final Iterator < O > outputIterator = output . iterator ( ) ; final ADTNode < S , I , O > result = new ADTSymbolNode < > ( null , inputIterator . next ( ) ) ; ADTNode < S , I , O > nodeIter = result ; while ( inputIterator . hasNext ( ) ) { final ADTNode < S , I , O > nextNode = new ADTSymbolNode < > ( nodeIter , inputIterator . next ( ) ) ; nodeIter . getChildren ( ) . put ( outputIterator . next ( ) , nextNode ) ; nodeIter = nextNode ; } final ADTNode < S , I , O > finalNode = new ADTLeafNode < > ( nodeIter , finalState ) ; nodeIter . getChildren ( ) . put ( outputIterator . next ( ) , finalNode ) ; return result ; }
|
Build a single trace ADS from the given information .
|
1,202
|
public List < S > bfsStates ( ) { List < S > stateList = new ArrayList < > ( ) ; Set < S > visited = new HashSet < > ( ) ; int ptr = 0 ; stateList . add ( root ) ; visited . add ( root ) ; int numStates = 1 ; while ( ptr < numStates ) { S curr = stateList . get ( ptr ++ ) ; for ( int i = 0 ; i < alphabetSize ; i ++ ) { S succ = curr . getSuccessor ( i ) ; if ( succ != null && visited . add ( succ ) ) { stateList . add ( succ ) ; numStates ++ ; } } } return stateList ; }
|
Retrieves a list of all states in this PTA that are reachable from the root state . The states will be returned in breadth - first order .
|
1,203
|
public Iterator < S > bfsIterator ( ) { Set < S > visited = new HashSet < > ( ) ; final Deque < S > bfsQueue = new ArrayDeque < > ( ) ; bfsQueue . add ( root ) ; visited . add ( root ) ; return new AbstractIterator < S > ( ) { protected S computeNext ( ) { S next = bfsQueue . poll ( ) ; if ( next == null ) { return endOfData ( ) ; } for ( int i = 0 ; i < alphabetSize ; i ++ ) { S child = next . getSuccessor ( i ) ; if ( child != null && visited . add ( child ) ) { bfsQueue . offer ( child ) ; } } return next ; } } ; }
|
Retrieves an iterator that can be used for iterating over all states in this PTA that are reachable from the root state in a breadth - first order .
|
1,204
|
protected List < List < Row < I > > > incorporateCounterExample ( DefaultQuery < I , D > ce ) { return ObservationTableCEXHandlers . handleClassicLStar ( ce , table , oracle ) ; }
|
Incorporates the information provided by a counterexample into the observation data structure .
|
1,205
|
protected boolean completeConsistentTable ( List < List < Row < I > > > unclosed , boolean checkConsistency ) { boolean refined = false ; List < List < Row < I > > > unclosedIter = unclosed ; do { while ( ! unclosedIter . isEmpty ( ) ) { List < Row < I > > closingRows = selectClosingRows ( unclosedIter ) ; unclosedIter = table . toShortPrefixes ( closingRows , oracle ) ; refined = true ; } if ( checkConsistency ) { Inconsistency < I > incons ; do { incons = table . findInconsistency ( ) ; if ( incons != null ) { Word < I > newSuffix = analyzeInconsistency ( incons ) ; unclosedIter = table . addSuffix ( newSuffix , oracle ) ; } } while ( unclosedIter . isEmpty ( ) && ( incons != null ) ) ; } } while ( ! unclosedIter . isEmpty ( ) ) ; return refined ; }
|
Iteratedly checks for unclosedness and inconsistencies in the table and fixes any occurrences thereof . This process is repeated until the observation table is both closed and consistent .
|
1,206
|
protected Word < I > analyzeInconsistency ( Inconsistency < I > incons ) { int inputIdx = alphabet . getSymbolIndex ( incons . getSymbol ( ) ) ; Row < I > succRow1 = incons . getFirstRow ( ) . getSuccessor ( inputIdx ) ; Row < I > succRow2 = incons . getSecondRow ( ) . getSuccessor ( inputIdx ) ; int numSuffixes = table . getSuffixes ( ) . size ( ) ; for ( int i = 0 ; i < numSuffixes ; i ++ ) { D val1 = table . cellContents ( succRow1 , i ) , val2 = table . cellContents ( succRow2 , i ) ; if ( ! Objects . equals ( val1 , val2 ) ) { I sym = alphabet . getSymbol ( inputIdx ) ; Word < I > suffix = table . getSuffixes ( ) . get ( i ) ; return suffix . prepend ( sym ) ; } } throw new IllegalArgumentException ( "Bogus inconsistency" ) ; }
|
Analyzes an inconsistency . This analysis consists in determining the column in which the two successor rows differ .
|
1,207
|
public static < N extends AbstractDTNode < ? , ? , ? , N > > Iterator < N > nodeIterator ( N root ) { return new NodeIterator < > ( root ) ; }
|
Iterator that traverses all nodes of a subtree of a given discrimination tree node .
|
1,208
|
public DefaultQuery < I , D > disprove ( A hypothesis , Collection < ? extends I > inputs ) throws ModelCheckingException { final DefaultQuery < I , D > result = propertyOracle . disprove ( hypothesis , inputs ) ; if ( result != null ) { LOGGER . logEvent ( "Property violated: '" + toString ( ) + "'" ) ; LOGGER . logQuery ( "Counter example for property: " + getCounterExample ( ) ) ; } return result ; }
|
Try to disprove this propertyOracle and log whenever it is disproved .
|
1,209
|
public DefaultQuery < I , D > doFindCounterExample ( A hypothesis , Collection < ? extends I > inputs ) throws ModelCheckingException { final DefaultQuery < I , D > result = propertyOracle . findCounterExample ( hypothesis , inputs ) ; if ( result != null ) { LOGGER . logEvent ( "Spurious counterexample found for property: '" + toString ( ) + "'" ) ; LOGGER . logCounterexample ( "Spurious counterexample: " + result ) ; } return result ; }
|
Try to find a counterexample to the given hypothesis and log whenever such a spurious counterexample is found .
|
1,210
|
protected RedBlueMerge < SP , TP , BlueFringePTAState < SP , TP > > tryMerge ( BlueFringePTA < SP , TP > pta , BlueFringePTAState < SP , TP > qr , BlueFringePTAState < SP , TP > qb ) { return pta . tryMerge ( qr , qb ) ; }
|
Attempts to merge a blue state into a red state .
|
1,211
|
public SampleSetEQOracle < I , D > add ( Word < I > input , D expectedOutput ) { testQueries . add ( new DefaultQuery < > ( input , expectedOutput ) ) ; return this ; }
|
Adds a query word along with its expected output to the sample set .
|
1,212
|
public final SampleSetEQOracle < I , D > addAll ( MembershipOracle < I , D > oracle , Word < I > ... words ) { return addAll ( oracle , Arrays . asList ( words ) ) ; }
|
Adds several query words to the sample set . The expected output is determined by means of the specified membership oracle .
|
1,213
|
public SampleSetEQOracle < I , D > addAll ( MembershipOracle < I , D > oracle , Collection < ? extends Word < I > > words ) { if ( words . isEmpty ( ) ) { return this ; } List < DefaultQuery < I , D > > newQueries = new ArrayList < > ( words . size ( ) ) ; for ( Word < I > w : words ) { newQueries . add ( new DefaultQuery < > ( w ) ) ; } oracle . processQueries ( newQueries ) ; testQueries . addAll ( newQueries ) ; return this ; }
|
Adds words to the sample set . The expected output is determined by means of the specified membership oracle .
|
1,214
|
protected static < I , D > void fetchResults ( Iterator < DefaultQuery < I , D > > queryIt , List < D > output , int numSuffixes ) { for ( int j = 0 ; j < numSuffixes ; j ++ ) { DefaultQuery < I , D > qry = queryIt . next ( ) ; output . add ( qry . getOutput ( ) ) ; } }
|
Fetches the given number of query responses and adds them to the specified output list . Also the query iterator is advanced accordingly .
|
1,215
|
private QueryResult < S , O > filterAndProcessQuery ( Word < I > query , Word < O > partialOutput , Function < Word < I > , QueryResult < S , O > > processQuery ) { final LinkedList < I > filteredQueryList = new LinkedList < > ( query . asList ( ) ) ; final Iterator < I > queryIterator = filteredQueryList . iterator ( ) ; for ( final O outputSymbol : partialOutput ) { queryIterator . next ( ) ; if ( outputSymbol != null ) { queryIterator . remove ( ) ; } } final QueryResult < S , O > res = processQuery . apply ( Word . fromList ( filteredQueryList ) ) ; final WordBuilder < O > wordBuilder = new WordBuilder < > ( ) ; final Iterator < O > resultIterator = res . output . iterator ( ) ; for ( final O output : partialOutput ) { if ( output == null ) { wordBuilder . add ( resultIterator . next ( ) ) ; } else { wordBuilder . add ( output ) ; } } return new QueryResult < > ( wordBuilder . toWord ( ) , res . newState ) ; }
|
Filters all the query elements corresponding to reflexive edges in the reuse tree executes the shorter query and fills the filtered outputs into the resulting output word .
|
1,216
|
public static < S , I , D > int findLinear ( Query < I , D > ceQuery , AccessSequenceTransformer < I > asTransformer , SuffixOutput < I , D > hypOutput , MembershipOracle < I , D > oracle ) { return AcexLocalSuffixFinder . findSuffixIndex ( AcexAnalyzers . LINEAR_FWD , true , ceQuery , asTransformer , hypOutput , oracle ) ; }
|
Searches for a distinguishing suffixes by checking for counterexample yielding access sequence transformations in linear ascending order .
|
1,217
|
public static < I , D > int findLinearReverse ( Query < I , D > ceQuery , AccessSequenceTransformer < I > asTransformer , SuffixOutput < I , D > hypOutput , MembershipOracle < I , D > oracle ) { return AcexLocalSuffixFinder . findSuffixIndex ( AcexAnalyzers . LINEAR_BWD , true , ceQuery , asTransformer , hypOutput , oracle ) ; }
|
Searches for a distinguishing suffixes by checking for counterexample yielding access sequence transformations in linear descending order .
|
1,218
|
public static < I , D > int findRivestSchapire ( Query < I , D > ceQuery , AccessSequenceTransformer < I > asTransformer , SuffixOutput < I , D > hypOutput , MembershipOracle < I , D > oracle ) { return AcexLocalSuffixFinder . findSuffixIndex ( AcexAnalyzers . BINARY_SEARCH_BWD , true , ceQuery , asTransformer , hypOutput , oracle ) ; }
|
Searches for a distinguishing suffixes by checking for counterexample yielding access sequence transformations using a binary search as proposed by Rivest & ; Schapire .
|
1,219
|
private void closeTransition ( final ADTTransition < I , O > transition ) { if ( ! transition . needsSifting ( ) ) { return ; } final Word < I > accessSequence = transition . getSource ( ) . getAccessSequence ( ) ; final I symbol = transition . getInput ( ) ; this . oracle . reset ( ) ; for ( final I i : accessSequence ) { this . oracle . query ( i ) ; } transition . setOutput ( this . oracle . query ( symbol ) ) ; final Word < I > longPrefix = accessSequence . append ( symbol ) ; final ADTNode < ADTState < I , O > , I , O > finalNode = this . adt . sift ( this . oracle , longPrefix , transition . getSiftNode ( ) ) ; assert ADTUtil . isLeafNode ( finalNode ) ; final ADTState < I , O > targetState ; if ( finalNode . getHypothesisState ( ) == null ) { targetState = this . hypothesis . addState ( ) ; targetState . setAccessSequence ( longPrefix ) ; finalNode . setHypothesisState ( targetState ) ; transition . setIsSpanningTreeEdge ( true ) ; this . observationTree . addState ( targetState , longPrefix , transition . getOutput ( ) ) ; for ( final I i : this . alphabet ) { this . openTransitions . add ( this . hypothesis . createOpenTransition ( targetState , i , this . adt . getRoot ( ) ) ) ; } } else { targetState = finalNode . getHypothesisState ( ) ; } transition . setTarget ( targetState ) ; }
|
Close the given transitions by means of sifting the associated long prefix through the ADT .
|
1,220
|
private void ensureConsistency ( final ADTNode < ADTState < I , O > , I , O > leaf ) { final ADTState < I , O > state = leaf . getHypothesisState ( ) ; final Word < I > as = state . getAccessSequence ( ) ; final Word < O > asOut = this . hypothesis . computeOutput ( as ) ; ADTNode < ADTState < I , O > , I , O > iter = leaf ; while ( iter != null ) { final Pair < Word < I > , Word < O > > trace = ADTUtil . buildTraceForNode ( iter ) ; final Word < I > input = trace . getFirst ( ) ; final Word < O > output = trace . getSecond ( ) ; final Word < O > hypOut = this . hypothesis . computeStateOutput ( state , input ) ; if ( ! hypOut . equals ( output ) ) { this . openCounterExamples . add ( new DefaultQuery < > ( as . concat ( input ) , asOut . concat ( output ) ) ) ; } iter = ADTUtil . getStartOfADS ( iter ) . getParent ( ) ; } }
|
Ensure that the output behavior of a hypothesis state matches the observed output behavior recorded in the ADT . Any differences in output behavior yields new counterexamples .
|
1,221
|
private boolean validateADS ( final ADTNode < ADTState < I , O > , I , O > oldADS , final ADTNode < ADTState < I , O > , I , O > newADS , final Set < ADTState < I , O > > cutout ) { final Set < ADTNode < ADTState < I , O > , I , O > > oldNodes ; if ( ADTUtil . isResetNode ( oldADS ) ) { oldNodes = ADTUtil . collectResetNodes ( this . adt . getRoot ( ) ) ; } else { oldNodes = ADTUtil . collectADSNodes ( this . adt . getRoot ( ) ) ; } if ( ! oldNodes . contains ( oldADS ) ) { throw new IllegalArgumentException ( "Subtree to replace does not exist" ) ; } final Set < ADTNode < ADTState < I , O > , I , O > > oldFinalNodes = ADTUtil . collectLeaves ( oldADS ) ; final Set < ADTNode < ADTState < I , O > , I , O > > newFinalNodes = ADTUtil . collectLeaves ( newADS ) ; final Set < ADTState < I , O > > oldFinalStates = oldFinalNodes . stream ( ) . map ( ADTNode :: getHypothesisState ) . collect ( Collectors . toSet ( ) ) ; final Set < ADTState < I , O > > newFinalStates = newFinalNodes . stream ( ) . map ( ADTNode :: getHypothesisState ) . collect ( Collectors . toSet ( ) ) ; newFinalStates . addAll ( cutout ) ; if ( ! oldFinalStates . equals ( newFinalStates ) ) { throw new IllegalArgumentException ( "New ADS does not cover all old nodes" ) ; } final Word < I > parentInputTrace = ADTUtil . buildTraceForNode ( oldADS ) . getFirst ( ) ; final Map < ADTState < I , O > , Pair < Word < I > , Word < O > > > traces = newFinalNodes . stream ( ) . collect ( Collectors . toMap ( ADTNode :: getHypothesisState , ADTUtil :: buildTraceForNode ) ) ; for ( final Map . Entry < ADTState < I , O > , Pair < Word < I > , Word < O > > > entry : traces . entrySet ( ) ) { final Word < I > accessSequence = entry . getKey ( ) . getAccessSequence ( ) ; final Word < I > prefix = accessSequence . concat ( parentInputTrace ) ; final Word < I > input = entry . getValue ( ) . getFirst ( ) ; final Word < O > output = entry . getValue ( ) . getSecond ( ) ; if ( ! this . hypothesis . computeSuffixOutput ( prefix , input ) . equals ( output ) ) { throw new IllegalArgumentException ( "Output of new ADS does not match hypothesis" ) ; } } return true ; }
|
Validate the well - definedness of an ADT replacement i . e . both ADTs cover the same set of hypothesis states and the output behavior described in the replacement matches the hypothesis output .
|
1,222
|
public void initialize ( final Collection < S > states , final Function < S , Word < I > > asFunction , final Function < Word < I > , Word < O > > outputFunction ) { final FastMealyState < O > init = this . observationTree . addInitialState ( ) ; for ( final S s : states ) { final Word < I > as = asFunction . apply ( s ) ; final FastMealyState < O > treeNode = this . addTrace ( init , as , outputFunction . apply ( as ) ) ; this . nodeToObservationMap . put ( s , treeNode ) ; } }
|
Extended initialization method that allows to initialize the observation tree with several hypothesis states .
|
1,223
|
public void addState ( final S newState , final Word < I > accessSequence , final O output ) { final Word < I > prefix = accessSequence . prefix ( accessSequence . length ( ) - 1 ) ; final I sym = accessSequence . lastSymbol ( ) ; final FastMealyState < O > pred = this . observationTree . getSuccessor ( this . observationTree . getInitialState ( ) , prefix ) ; final FastMealyState < O > target ; if ( pred . getTransitionObject ( alphabet . getSymbolIndex ( sym ) ) == null ) { target = this . observationTree . addState ( ) ; this . observationTree . addTransition ( pred , sym , target , output ) ; } else { target = this . observationTree . getSuccessor ( pred , sym ) ; } this . nodeToObservationMap . put ( newState , target ) ; }
|
Registers a new hypothesis state at the observation tree . It is expected to register states in the order of their discovery meaning whenever a new state is added information about all prefixes of its access sequence are already stored . Therefore providing only the output of the last symbol of its access sequence is sufficient .
|
1,224
|
public Optional < Word < I > > findSeparatingWord ( final S s1 , final S s2 , final Word < I > prefix ) { final FastMealyState < O > n1 = this . nodeToObservationMap . get ( s1 ) ; final FastMealyState < O > n2 = this . nodeToObservationMap . get ( s2 ) ; final FastMealyState < O > s1Succ = this . observationTree . getSuccessor ( n1 , prefix ) ; final FastMealyState < O > s2Succ = this . observationTree . getSuccessor ( n2 , prefix ) ; if ( s1Succ != null && s2Succ != null ) { final Word < I > sepWord = NearLinearEquivalenceTest . findSeparatingWord ( this . observationTree , s1Succ , s2Succ , alphabet , true ) ; if ( sepWord != null ) { return Optional . of ( sepWord ) ; } } return Optional . empty ( ) ; }
|
Find a separating word for two hypothesis states after applying given input sequence first .
|
1,225
|
public Word < I > findSeparatingWord ( final S s1 , final S s2 ) { final FastMealyState < O > n1 = this . nodeToObservationMap . get ( s1 ) ; final FastMealyState < O > n2 = this . nodeToObservationMap . get ( s2 ) ; return NearLinearEquivalenceTest . findSeparatingWord ( this . observationTree , n1 , n2 , this . alphabet , true ) ; }
|
Find a separating word for two hypothesis states .
|
1,226
|
protected static < I , D > void link ( AbstractBaseDTNode < I , D > dtNode , TTTState < I , D > state ) { assert dtNode . isLeaf ( ) ; dtNode . setData ( state ) ; state . dtLeaf = dtNode ; }
|
Establish the connection between a node in the discrimination tree and a state of the hypothesis .
|
1,227
|
protected void initializeState ( TTTState < I , D > state ) { for ( int i = 0 ; i < alphabet . size ( ) ; i ++ ) { I sym = alphabet . getSymbol ( i ) ; TTTTransition < I , D > trans = createTransition ( state , sym ) ; trans . setNonTreeTarget ( dtree . getRoot ( ) ) ; state . setTransition ( i , trans ) ; openTransitions . insertIncoming ( trans ) ; } }
|
Initializes a state . Creates its outgoing transition objects and adds them to the open list .
|
1,228
|
private void splitState ( TTTTransition < I , D > transition , Word < I > tempDiscriminator , D oldOut , D newOut ) { assert ! transition . isTree ( ) ; notifyPreSplit ( transition , tempDiscriminator ) ; AbstractBaseDTNode < I , D > dtNode = transition . getNonTreeTarget ( ) ; assert dtNode . isLeaf ( ) ; TTTState < I , D > oldState = dtNode . getData ( ) ; assert oldState != null ; TTTState < I , D > newState = makeTree ( transition ) ; AbstractBaseDTNode < I , D > . SplitResult children = split ( dtNode , tempDiscriminator , oldOut , newOut ) ; dtNode . setTemp ( true ) ; link ( children . nodeOld , oldState ) ; link ( children . nodeNew , newState ) ; if ( dtNode . getParent ( ) == null || ! dtNode . getParent ( ) . isTemp ( ) ) { blockList . insertBlock ( dtNode ) ; } notifyPostSplit ( transition , tempDiscriminator ) ; }
|
Splits a state in the hypothesis using a temporary discriminator . The state to be split is identified by an incoming non - tree transition . This transition is subsequently turned into a spanning tree transition .
|
1,229
|
protected boolean finalizeAny ( ) { GlobalSplitter < I , D > splitter = findSplitterGlobal ( ) ; if ( splitter != null ) { finalizeDiscriminator ( splitter . blockRoot , splitter . localSplitter ) ; return true ; } return false ; }
|
Chooses a block root and finalizes the corresponding discriminator .
|
1,230
|
protected TTTState < I , D > getAnyTarget ( TTTTransition < I , D > trans ) { if ( trans . isTree ( ) ) { return trans . getTreeTarget ( ) ; } return trans . getNonTreeTarget ( ) . anySubtreeState ( ) ; }
|
Retrieves the target state of a given transition . This method works for both tree and non - tree transitions . If a non - tree transition points to a non - leaf node it is updated accordingly before a result is obtained .
|
1,231
|
private TTTState < I , D > getAnyState ( Iterable < ? extends I > suffix ) { return getAnySuccessor ( hypothesis . getInitialState ( ) , suffix ) ; }
|
Retrieves the state reached by the given sequence of symbols starting from the initial state .
|
1,232
|
protected D query ( Word < I > prefix , Word < I > suffix ) { return oracle . answerQuery ( prefix , suffix ) ; }
|
Performs a membership query .
|
1,233
|
protected D query ( AccessSequenceProvider < I > accessSeqProvider , Word < I > suffix ) { return query ( accessSeqProvider . getAccessSequence ( ) , suffix ) ; }
|
Performs a membership query using an access sequence as its prefix .
|
1,234
|
public static < E > int linearSearchFwd ( AbstractCounterexample < E > acex , int low , int high ) { assert ! acex . testEffects ( low , high ) ; E effPrev = acex . effect ( low ) ; for ( int i = low + 1 ; i <= high ; i ++ ) { E eff = acex . effect ( i ) ; if ( ! acex . checkEffects ( effPrev , eff ) ) { return i - 1 ; } effPrev = eff ; } throw new IllegalArgumentException ( ) ; }
|
Scan linearly through the counterexample in ascending order .
|
1,235
|
public static < E > int exponentialSearchBwd ( AbstractCounterexample < E > acex , int low , int high ) { assert ! acex . testEffects ( low , high ) ; int ofs = 1 ; E effHigh = acex . effect ( high ) ; int highIter = high ; int lowIter = low ; while ( highIter - ofs > lowIter ) { int next = highIter - ofs ; E eff = acex . effect ( next ) ; if ( ! acex . checkEffects ( eff , effHigh ) ) { lowIter = next ; break ; } highIter = next ; ofs *= 2 ; } return binarySearchRight ( acex , lowIter , highIter ) ; }
|
Search for a suffix index using an exponential search .
|
1,236
|
public E insert ( E element ) { E evicted = null ; if ( size ( ) >= capacity ) { if ( evictPolicy == EvictPolicy . REJECT_NEW ) { return element ; } evicted = evict ( ) ; } deque . offerLast ( element ) ; return evicted ; }
|
Inserts an element into the deque and returns the one that had to be evicted in case of a capacity violation .
|
1,237
|
public ADTNode < S , I , O > sift ( final SymbolQueryOracle < I , O > oracle , final Word < I > word , final ADTNode < S , I , O > subtree ) { ADTNode < S , I , O > current = subtree ; while ( ! ADTUtil . isLeafNode ( current ) ) { current = current . sift ( oracle , word ) ; } return current ; }
|
Successively sifts a word through the ADT induced by the given node . Stops when reaching a leaf .
|
1,238
|
public ADTNode < S , I , O > extendLeaf ( final ADTNode < S , I , O > nodeToSplit , final Word < I > distinguishingSuffix , final Word < O > oldOutput , final Word < O > newOutput ) { if ( ! ADTUtil . isLeafNode ( nodeToSplit ) ) { throw new IllegalArgumentException ( "Node to split is not a leaf node" ) ; } if ( ! ( distinguishingSuffix . length ( ) == oldOutput . length ( ) && oldOutput . length ( ) == newOutput . length ( ) ) ) { throw new IllegalArgumentException ( "Distinguishing suffixes and outputs differ in length" ) ; } if ( oldOutput . equals ( newOutput ) ) { throw new IllegalArgumentException ( "Old and new output are equal" ) ; } if ( this . root . equals ( nodeToSplit ) ) { return splitLeaf ( nodeToSplit , distinguishingSuffix , oldOutput , newOutput ) ; } return LeafSplitters . splitParent ( nodeToSplit , distinguishingSuffix , oldOutput , newOutput ) ; }
|
Splitting a leaf node by extending the trace leading into the node to split .
|
1,239
|
public LCAInfo < S , I , O > findLCA ( final ADTNode < S , I , O > s1 , final ADTNode < S , I , O > s2 ) { final Map < ADTNode < S , I , O > , ADTNode < S , I , O > > s1ParentsToS1 = new HashMap < > ( ) ; ADTNode < S , I , O > s1Iter = s1 ; ADTNode < S , I , O > s2Iter = s2 ; while ( s1Iter . getParent ( ) != null ) { s1ParentsToS1 . put ( s1Iter . getParent ( ) , s1Iter ) ; s1Iter = s1Iter . getParent ( ) ; } final Set < ADTNode < S , I , O > > s1Parents = s1ParentsToS1 . keySet ( ) ; while ( s2Iter . getParent ( ) != null ) { if ( s1Parents . contains ( s2Iter . getParent ( ) ) ) { if ( ! ADTUtil . isSymbolNode ( s2Iter . getParent ( ) ) ) { throw new IllegalStateException ( "Only Symbol Nodes should be LCAs" ) ; } final ADTNode < S , I , O > lca = s2Iter . getParent ( ) ; final O s1Out = ADTUtil . getOutputForSuccessor ( lca , s1ParentsToS1 . get ( lca ) ) ; final O s2Out = ADTUtil . getOutputForSuccessor ( lca , s2Iter ) ; return new LCAInfo < > ( lca , s1Out , s2Out ) ; } s2Iter = s2Iter . getParent ( ) ; } throw new IllegalStateException ( "Nodes do not share a parent node" ) ; }
|
Return the lowest common ancestor for the given two nodes .
|
1,240
|
public static String getResults ( ) { StringBuilder sb = new StringBuilder ( ) ; for ( Entry < String , Counter > e : CUMULATED . entrySet ( ) ) { sb . append ( e . getValue ( ) . getSummary ( ) ) . append ( ", (" ) . append ( e . getValue ( ) . getCount ( ) / MILLISECONDS_PER_SECOND ) . append ( " s)" ) . append ( System . lineSeparator ( ) ) ; } return sb . toString ( ) ; }
|
Get profiling results as string .
|
1,241
|
public static void logResults ( ) { for ( Entry < String , Counter > e : CUMULATED . entrySet ( ) ) { LOGGER . logProfilingInfo ( e . getValue ( ) ) ; } }
|
Log results in category PROFILING .
|
1,242
|
public static < I , O > MealyCacheOracle < I , O > createDAGCache ( Alphabet < I > alphabet , MembershipOracle < I , Word < O > > mqOracle ) { return MealyCacheOracle . createDAGCacheOracle ( alphabet , mqOracle ) ; }
|
Creates a cache oracle for a Mealy machine learning setup using a DAG for internal cache organization .
|
1,243
|
public static < I , O > MealyCacheOracle < I , O > createTreeCache ( Alphabet < I > alphabet , MembershipOracle < I , Word < O > > mqOracle ) { return MealyCacheOracle . createTreeCacheOracle ( alphabet , mqOracle ) ; }
|
Creates a cache oracle for a Mealy machine learning setup using a tree for internal cache organization .
|
1,244
|
public static < I , O > MealyCacheOracle < I , OutputAndLocalInputs < I , O > > createStateLocalInputTreeCache ( Collection < I > initialLocalInputs , MembershipOracle < I , Word < OutputAndLocalInputs < I , O > > > mqOracle ) { return MealyCacheOracle . createStateLocalInputTreeCacheOracle ( initialLocalInputs , mqOracle ) ; }
|
Creates a cache oracle for a Mealy machine learning setup with observable state local inputs for every state of the system under learning .
|
1,245
|
public DFALearner < I > asDFALearner ( ) { return new DFALearner < I > ( ) { public String toString ( ) { return NLStarLearner . this . toString ( ) ; } public void startLearning ( ) { NLStarLearner . this . startLearning ( ) ; } public boolean refineHypothesis ( DefaultQuery < I , Boolean > ceQuery ) { return NLStarLearner . this . refineHypothesis ( ceQuery ) ; } public CompactDFA < I > getHypothesisModel ( ) { return NLStarLearner . this . getDeterminizedHypothesis ( ) ; } } ; }
|
Retrieves a view of this learner as a DFA learner . The DFA is obtained by determinizing and minimizing the NFA hypothesis .
|
1,246
|
public void insertBlock ( AbstractBaseDTNode < I , D > blockRoot ) { blockRoot . removeFromBlockList ( ) ; blockRoot . setNextElement ( next ) ; if ( getNextElement ( ) != null ) { next . setPrevElement ( blockRoot ) ; } blockRoot . setPrevElement ( this ) ; next = blockRoot ; }
|
Inserts a block into the list . Currently the block is inserted at the head of the list . However callers should not rely on this .
|
1,247
|
@ Autowired ( required = false ) public void setSamlLogger ( SAMLLogger samlLogger ) { Assert . notNull ( samlLogger , "SAMLLogger can't be null" ) ; this . samlLogger = samlLogger ; }
|
Logger for SAML events cannot be null must be set .
|
1,248
|
@ Autowired ( required = false ) @ Qualifier ( "webSSOprofileConsumer" ) public void setConsumer ( WebSSOProfileConsumer consumer ) { Assert . notNull ( consumer , "WebSSO Profile Consumer can't be null" ) ; this . consumer = consumer ; }
|
Profile for consumption of processed messages must be set .
|
1,249
|
@ Autowired ( required = false ) @ Qualifier ( "hokWebSSOprofileConsumer" ) public void setHokConsumer ( WebSSOProfileConsumer hokConsumer ) { this . hokConsumer = hokConsumer ; }
|
Profile for consumption of processed messages using the Holder - of - Key profile must be set .
|
1,250
|
public KeyStore loadKeystore ( String certResourceLocation , String privateKeyResourceLocation , String alias , String keyPassword ) { KeyStore keystore = createEmptyKeystore ( ) ; X509Certificate cert = loadCert ( certResourceLocation ) ; RSAPrivateKey privateKey = loadPrivateKey ( privateKeyResourceLocation ) ; addKeyToKeystore ( keystore , cert , privateKey , alias , keyPassword ) ; return keystore ; }
|
Based on a public certificate private key alias and password this method will load the certificate and private key as an entry into a newly created keystore and it will set the provided alias and password to the keystore entry .
|
1,251
|
public void addKeyToKeystore ( KeyStore keyStore , X509Certificate cert , RSAPrivateKey privateKey , String alias , String password ) { KeyStore . PasswordProtection pass = new KeyStore . PasswordProtection ( password . toCharArray ( ) ) ; Certificate [ ] certificateChain = { cert } ; keyStore . setEntry ( alias , new KeyStore . PrivateKeyEntry ( privateKey , certificateChain ) , pass ) ; }
|
Based on a public certificate private key alias and password this method will load the certificate and private key as an entry into the keystore and it will set the provided alias and password to the keystore entry .
|
1,252
|
public KeyStore createEmptyKeystore ( ) { KeyStore keyStore = KeyStore . getInstance ( "JKS" ) ; keyStore . load ( null , "" . toCharArray ( ) ) ; return keyStore ; }
|
Returns an empty KeyStore object .
|
1,253
|
public X509Certificate loadCert ( String certLocation ) { CertificateFactory cf = CertificateFactory . getInstance ( "X509" ) ; Resource certRes = resourceLoader . getResource ( certLocation ) ; X509Certificate cert = ( X509Certificate ) cf . generateCertificate ( certRes . getInputStream ( ) ) ; return cert ; }
|
Given a resource location it loads a PEM X509 certificate .
|
1,254
|
public RSAPrivateKey loadPrivateKey ( String privateKeyLocation ) { Resource keyRes = resourceLoader . getResource ( privateKeyLocation ) ; byte [ ] keyBytes = StreamUtils . copyToByteArray ( keyRes . getInputStream ( ) ) ; PKCS8EncodedKeySpec privateKeySpec = new PKCS8EncodedKeySpec ( keyBytes ) ; KeyFactory keyFactory = KeyFactory . getInstance ( "RSA" ) ; RSAPrivateKey privateKey = ( RSAPrivateKey ) keyFactory . generatePrivate ( privateKeySpec ) ; return privateKey ; }
|
Given a resource location it loads a DER RSA private Key .
|
1,255
|
public static Properties initialize ( URI uri , Configuration conf ) throws IOException , ConfigurationParseException { String host = Utils . getHost ( uri ) ; Properties props = new Properties ( ) ; if ( ! Utils . validSchema ( uri ) ) { props . setProperty ( SWIFT_AUTH_METHOD_PROPERTY , PUBLIC_ACCESS ) ; } else { final String container = Utils . getContainerName ( host ) ; final String service = Utils . getServiceName ( host ) ; final String [ ] prefix = new String [ ] { SWIFT_SERVICE_PREFIX + service } ; final String prefix2D = SWIFT2D_SERVICE_PREFIX + service ; props . setProperty ( SWIFT_CONTAINER_PROPERTY , container ) ; Utils . updateProperty ( conf , prefix2D , prefix , AUTH_URL , props , SWIFT_AUTH_PROPERTY , true ) ; Utils . updateProperty ( conf , prefix2D , prefix , USERNAME , props , SWIFT_USERNAME_PROPERTY , true ) ; Utils . updateProperty ( conf , prefix2D , prefix , PASSWORD , props , SWIFT_PASSWORD_PROPERTY , true ) ; Utils . updateProperty ( conf , prefix2D , prefix , BUFFER_DIR , props , BUFFER_DIR_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix2D , prefix , NON_STREAMING_UPLOAD , props , NON_STREAMING_UPLOAD_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix2D , prefix , AUTH_METHOD , props , SWIFT_AUTH_METHOD_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix2D , prefix , BLOCK_SIZE , props , SWIFT_BLOCK_SIZE_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix2D , prefix , FMODE_DELETE_TEMP_DATA , props , FMODE_AUTOMATIC_DELETE_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix2D , prefix , PUBLIC , props , SWIFT_PUBLIC_PROPERTY , false ) ; String authMethod = props . getProperty ( SWIFT_AUTH_METHOD_PROPERTY , KEYSTONE_V3_AUTH ) ; props . setProperty ( SWIFT_AUTH_METHOD_PROPERTY , authMethod ) ; if ( authMethod . equals ( KEYSTONE_V3_AUTH ) ) { Utils . updateProperty ( conf , prefix2D , prefix , TENANT , props , SWIFT_TENANT_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix2D , prefix , REGION , props , SWIFT_REGION_PROPERTY , true ) ; props . setProperty ( SWIFT_PROJECT_ID_PROPERTY , props . getProperty ( SWIFT_TENANT_PROPERTY ) ) ; props . setProperty ( SWIFT_USER_ID_PROPERTY , props . getProperty ( SWIFT_USERNAME_PROPERTY ) ) ; } else if ( authMethod . equals ( "basic" ) ) { Utils . updateProperty ( conf , prefix2D , prefix , REGION , props , SWIFT_REGION_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix2D , prefix , TENANT , props , SWIFT_TENANT_PROPERTY , false ) ; } else { Utils . updateProperty ( conf , prefix2D , prefix , REGION , props , SWIFT_REGION_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix2D , prefix , TENANT , props , SWIFT_TENANT_PROPERTY , true ) ; } } return props ; }
|
Parse configuration properties from the core - site . xml and initialize Swift configuration
|
1,256
|
public SwiftCachedObject get ( final String objName ) throws IOException { LOG . trace ( "Get from cache: {}" , objName ) ; SwiftCachedObject res = cache . get ( objName ) ; if ( res == null ) { LOG . trace ( "Cache get: {} is not in the cache. Access Swift to get content length" , objName ) ; StoredObject rawObj = container . getObject ( removeTrailingSlash ( objName ) ) ; if ( rawObj != null && rawObj . exists ( ) ) { res = new SwiftCachedObject ( rawObj . getContentLength ( ) , Utils . lastModifiedAsLong ( rawObj . getLastModified ( ) ) ) ; put ( objName , res ) ; } else { return null ; } } return res ; }
|
The get function will first search for the object in the cache . If not found will issue a HEAD request for the object metadata and add the object to the cache .
|
1,257
|
public boolean isTemporaryPath ( String path ) { for ( String tempPath : tempIdentifiers ) { String [ ] tempPathComponents = tempPath . split ( "/" ) ; if ( tempPathComponents . length > 0 && path != null && path . contains ( tempPathComponents [ 0 ] . replace ( "ID" , "" ) ) ) { return true ; } } return false ; }
|
Inspect the path and return true if path contains reserved _temporary or the first entry from fs . stocator . temp . identifier if provided
|
1,258
|
public Path modifyPathToFinalDestination ( Path path ) throws IOException { String res ; if ( tempFileOriginator . equals ( DEFAULT_FOUTPUTCOMMITTER_V1 ) ) { res = parseHadoopOutputCommitter ( path , true , hostNameScheme ) ; } else { res = extractNameFromTempPath ( path , true , hostNameScheme ) ; } return new Path ( hostNameScheme , res ) ; }
|
Accept temporary path and return a final destination path
|
1,259
|
private String parseHadoopOutputCommitter ( Path fullPath , boolean addTaskIdCompositeName , String hostNameScheme ) throws IOException { String path = fullPath . toString ( ) ; String noPrefix = path ; if ( path . startsWith ( hostNameScheme ) ) { noPrefix = path . substring ( hostNameScheme . length ( ) ) ; } int npIdx = noPrefix . indexOf ( HADOOP_TEMPORARY ) ; String objectName ; if ( npIdx >= 0 ) { if ( npIdx == 0 || npIdx == 1 && noPrefix . startsWith ( "/" ) ) { throw new IOException ( "Object name is missing" ) ; } else { objectName = noPrefix . substring ( 0 , npIdx - 1 ) ; if ( addTaskIdCompositeName ) { String objName = null ; String taskAttempt = Utils . extractTaskID ( path , HADOOP_ATTEMPT ) ; if ( taskAttempt != null ) { int fIndex = fullPath . toString ( ) . indexOf ( taskAttempt + "/" ) ; if ( fIndex > 0 ) { fIndex = fIndex + taskAttempt . length ( ) + 1 ; } if ( fIndex < fullPath . toString ( ) . length ( ) ) { objName = fullPath . toString ( ) . substring ( fIndex ) ; } } if ( objName == null ) { objName = fullPath . getName ( ) ; } if ( taskAttempt != null && ! objName . startsWith ( HADOOP_ATTEMPT ) ) { String extension = extractExtension ( objName ) ; objName = objName . replace ( "." + extension , "" ) + "-" + taskAttempt ; if ( ! extension . equals ( "" ) ) { objName += "." + extension ; } } objectName = objectName + "/" + objName ; } } return objectName ; } return noPrefix ; }
|
Main method to parse Hadoop OutputCommitter V1 or V2 as used by Hadoop M - R or Apache Spark Method transforms object name from the temporary path .
|
1,260
|
private String extractExtension ( String filename ) { int startExtension = filename . indexOf ( '.' ) ; if ( startExtension > 0 ) { return filename . substring ( startExtension + 1 ) ; } return "" ; }
|
A filename for example one3 - attempt - 01 . txt . gz will return txt . gz
|
1,261
|
protected ObjectMetadata getObjectMetadata ( String key ) { try { ObjectMetadata meta = mClient . getObjectMetadata ( mBucket , key ) ; return meta ; } catch ( AmazonClientException e ) { LOG . debug ( e . getMessage ( ) ) ; return null ; } }
|
Request object metadata Used to call _SUCCESS object or identify if objects were generated by Stocator
|
1,262
|
public PutObjectRequest newPutObjectRequest ( String key , ObjectMetadata metadata , File srcfile ) { PutObjectRequest putObjectRequest = new PutObjectRequest ( mBucket , key , srcfile ) ; putObjectRequest . setMetadata ( metadata ) ; return putObjectRequest ; }
|
Create a putObject request . Adds the ACL and metadata
|
1,263
|
private void initConnectionSettings ( Configuration conf , ClientConfiguration clientConf ) throws IOException { clientConf . setMaxConnections ( Utils . getInt ( conf , FS_COS , FS_ALT_KEYS , MAXIMUM_CONNECTIONS , DEFAULT_MAXIMUM_CONNECTIONS ) ) ; clientConf . setClientExecutionTimeout ( Utils . getInt ( conf , FS_COS , FS_ALT_KEYS , CLIENT_EXEC_TIMEOUT , DEFAULT_CLIENT_EXEC_TIMEOUT ) ) ; clientConf . setMaxErrorRetry ( Utils . getInt ( conf , FS_COS , FS_ALT_KEYS , MAX_ERROR_RETRIES , DEFAULT_MAX_ERROR_RETRIES ) ) ; clientConf . setConnectionTimeout ( Utils . getInt ( conf , FS_COS , FS_ALT_KEYS , ESTABLISH_TIMEOUT , DEFAULT_ESTABLISH_TIMEOUT ) ) ; clientConf . setSocketTimeout ( Utils . getInt ( conf , FS_COS , FS_ALT_KEYS , SOCKET_TIMEOUT , DEFAULT_SOCKET_TIMEOUT ) ) ; clientConf . setRequestTimeout ( Utils . getInt ( conf , FS_COS , FS_ALT_KEYS , REQUEST_TIMEOUT , DEFAULT_REQUEST_TIMEOUT ) ) ; int sockSendBuffer = Utils . getInt ( conf , FS_COS , FS_ALT_KEYS , SOCKET_SEND_BUFFER , DEFAULT_SOCKET_SEND_BUFFER ) ; int sockRecvBuffer = Utils . getInt ( conf , FS_COS , FS_ALT_KEYS , SOCKET_RECV_BUFFER , DEFAULT_SOCKET_RECV_BUFFER ) ; clientConf . setSocketBufferSizeHints ( sockSendBuffer , sockRecvBuffer ) ; String signerOverride = Utils . getTrimmed ( conf , FS_COS , FS_ALT_KEYS , SIGNING_ALGORITHM , "" ) ; if ( ! signerOverride . isEmpty ( ) ) { LOG . debug ( "Signer override = {}" , signerOverride ) ; clientConf . setSignerOverride ( signerOverride ) ; } String userAgentPrefix = Utils . getTrimmed ( conf , FS_COS , FS_ALT_KEYS , USER_AGENT_PREFIX , DEFAULT_USER_AGENT_PREFIX ) ; String userAgentName = singletoneInitTimeData . getUserAgentName ( ) ; if ( ! userAgentPrefix . equals ( DEFAULT_USER_AGENT_PREFIX ) ) { userAgentName = userAgentPrefix + " " + userAgentName ; } clientConf . setUserAgentPrefix ( userAgentName ) ; }
|
Initializes connection management
|
1,264
|
private String correctPlusSign ( String origin , String stringToCorrect ) { if ( origin . contains ( "+" ) ) { LOG . debug ( "Adapt plus sign in {} to avoid SDK bug on {}" , origin , stringToCorrect ) ; StringBuilder tmpStringToCorrect = new StringBuilder ( stringToCorrect ) ; boolean hasSign = true ; int fromIndex = 0 ; while ( hasSign ) { int plusLocation = origin . indexOf ( "+" , fromIndex ) ; if ( plusLocation < 0 ) { hasSign = false ; break ; } if ( tmpStringToCorrect . charAt ( plusLocation ) == ' ' ) { tmpStringToCorrect . setCharAt ( plusLocation , '+' ) ; } if ( origin . length ( ) <= plusLocation + 1 ) { fromIndex = plusLocation + 1 ; } else { fromIndex = origin . length ( ) ; } } LOG . debug ( "Adapt plus sign {} corrected to {}" , stringToCorrect , tmpStringToCorrect . toString ( ) ) ; return tmpStringToCorrect . toString ( ) ; } return stringToCorrect ; }
|
Due to SDK bug list operations may return strings that has spaces instead of + This method will try to fix names for known patterns
|
1,265
|
private void copyFile ( String srcKey , String dstKey , long size ) throws IOException , InterruptedIOException , AmazonClientException { LOG . debug ( "copyFile {} -> {} " , srcKey , dstKey ) ; CopyObjectRequest copyObjectRequest = new CopyObjectRequest ( mBucket , srcKey , mBucket , dstKey ) ; try { ObjectMetadata srcmd = getObjectMetadata ( srcKey ) ; if ( srcmd != null ) { copyObjectRequest . setNewObjectMetadata ( srcmd ) ; } ProgressListener progressListener = new ProgressListener ( ) { public void progressChanged ( ProgressEvent progressEvent ) { switch ( progressEvent . getEventType ( ) ) { case TRANSFER_PART_COMPLETED_EVENT : break ; default : break ; } } } ; Copy copy = transfers . copy ( copyObjectRequest ) ; copy . addProgressListener ( progressListener ) ; try { copy . waitForCopyResult ( ) ; } catch ( InterruptedException e ) { throw new InterruptedIOException ( "Interrupted copying " + srcKey + " to " + dstKey + ", cancelling" ) ; } } catch ( AmazonClientException e ) { throw translateException ( "copyFile(" + srcKey + ", " + dstKey + ")" , srcKey , e ) ; } }
|
Copy a single object in the bucket via a COPY operation .
|
1,266
|
static BlockFactory createFactory ( COSAPIClient owner , String name ) { switch ( name ) { case COSConstants . FAST_UPLOAD_BUFFER_ARRAY : return new ArrayBlockFactory ( owner ) ; case COSConstants . FAST_UPLOAD_BUFFER_DISK : return new DiskBlockFactory ( owner ) ; default : throw new IllegalArgumentException ( "Unsupported block buffer" + " \"" + name + '"' ) ; } }
|
Create a factory .
|
1,267
|
private synchronized void reopen ( String msg , long targetPos , long length ) throws IOException { if ( wrappedStream != null ) { closeStream ( "reopen(" + msg + ")" , contentRangeFinish ) ; } contentRangeStart = targetPos ; contentRangeFinish = targetPos + Math . max ( readahead , length ) + threasholdRead ; if ( negativeSeek < 0 ) { contentRangeFinish = targetPos + Math . abs ( negativeSeek ) ; negativeSeek = 0 ; } try { LOG . trace ( "reopen({}) for {} range[{}-{}], length={}," + " streamPosition={}, nextReadPosition={}" , uri , msg , contentRangeStart , contentRangeFinish , length , pos , nextReadPos ) ; wrappedStream = SwiftAPIDirect . getObject ( new Path ( uri ) , mJossAccount , contentRangeStart , contentRangeFinish , scm ) ; if ( wrappedStream == null ) { throw new IOException ( "Null IO stream from reopen of (" + msg + ") " + uri ) ; } } catch ( ClientException e ) { LOG . error ( e . getMessage ( ) ) ; throw new IOException ( "Reopen at position " + targetPos + uri ) ; } pos = targetPos ; }
|
Reopen stream if closed
|
1,268
|
private void closeStream ( String msg , long length ) { if ( wrappedStream != null ) { long remaining = remainingInCurrentRequest ( ) ; boolean shouldAbort = remaining > readahead ; if ( ! shouldAbort ) { try { wrappedStream . close ( ) ; } catch ( IOException e ) { LOG . debug ( "When closing {} stream for {}" , uri , msg , e ) ; shouldAbort = true ; } } if ( shouldAbort ) { wrappedStream . abort ( ) ; } LOG . trace ( "Close stream {} {}: {}; streamPos={}, nextReadPos={}," + " request range {}-{} length={}" , uri , ( shouldAbort ? "aborted" : "closed" ) , msg , pos , nextReadPos , contentRangeStart , contentRangeFinish , length ) ; wrappedStream = null ; } }
|
close the stream
|
1,269
|
public synchronized long remainingInFile ( ) throws IOException { return objectCache . get ( objName ) . getContentLength ( ) - pos ; }
|
Bytes left in stream .
|
1,270
|
private synchronized void reopen ( String reason , long targetPos , long length ) throws IOException { if ( wrappedStream != null ) { closeStream ( "reopen(" + reason + ")" , contentRangeFinish , false ) ; } contentRangeFinish = calculateRequestLimit ( inputPolicy , targetPos , length , contentLength , readahead ) ; LOG . debug ( "reopen({}) for {} range[{}-{}], length={}," + " streamPosition={}, nextReadPosition={}" , uri , reason , targetPos , contentRangeFinish , length , pos , nextReadPos ) ; try { GetObjectRequest request = new GetObjectRequest ( bucket , key ) . withRange ( targetPos , contentRangeFinish - 1 ) ; wrappedStream = client . getObject ( request ) . getObjectContent ( ) ; contentRangeStart = targetPos ; if ( wrappedStream == null ) { throw new IOException ( "Null IO stream from reopen of (" + reason + ") " + uri ) ; } } catch ( AmazonClientException e ) { throw COSUtils . translateException ( "Reopen at position " + targetPos , uri , e ) ; } pos = targetPos ; }
|
Opens up the stream at specified target position and for given length .
|
1,271
|
private void lazySeek ( long targetPos , long len ) throws IOException { seekInStream ( targetPos , len ) ; if ( wrappedStream == null ) { reopen ( "read from new offset" , targetPos , len ) ; } }
|
Perform lazy seek and adjust stream to correct position for reading .
|
1,272
|
static long calculateRequestLimit ( COSInputPolicy inputPolicy , long targetPos , long length , long contentLength , long readahead ) { long rangeLimit ; switch ( inputPolicy ) { case Random : rangeLimit = ( length < 0 ) ? contentLength : targetPos + Math . max ( readahead , length ) ; break ; case Sequential : rangeLimit = contentLength ; break ; case Normal : default : rangeLimit = contentLength ; } rangeLimit = Math . min ( contentLength , rangeLimit ) ; return rangeLimit ; }
|
Calculate the limit for a get request based on input policy and state of object .
|
1,273
|
public static String getContainerName ( String hostname , boolean serviceRequired ) throws IOException { int i = hostname . lastIndexOf ( "." ) ; if ( i <= 0 ) { if ( serviceRequired ) { throw badHostName ( hostname ) ; } return hostname ; } return hostname . substring ( 0 , i ) ; }
|
Extracts container name from the container . service or container
|
1,274
|
public static String getServiceName ( String hostname ) throws IOException { int i = hostname . lastIndexOf ( "." ) ; if ( i <= 0 ) { throw badHostName ( hostname ) ; } String service = hostname . substring ( i + 1 ) ; if ( service . isEmpty ( ) || service . contains ( "." ) ) { throw badHostName ( hostname ) ; } return service ; }
|
Extracts service name from the container . service
|
1,275
|
public static boolean validSchema ( URI uri ) throws IOException { LOG . trace ( "Checking schema {}" , uri . toString ( ) ) ; String hostName = Utils . getHost ( uri ) ; LOG . trace ( "Got hostname as {}" , hostName ) ; int i = hostName . lastIndexOf ( "." ) ; if ( i < 0 ) { return false ; } String service = hostName . substring ( i + 1 ) ; LOG . trace ( "Got service as {}" , service ) ; if ( service . isEmpty ( ) || service . contains ( "." ) ) { return false ; } return true ; }
|
Test if hostName of the form container . service
|
1,276
|
public static String getHost ( URI uri ) throws IOException { String host = uri . getHost ( ) ; if ( host != null ) { return host ; } host = uri . toString ( ) ; int sInd = host . indexOf ( "//" ) + 2 ; host = host . substring ( sInd ) ; int eInd = host . indexOf ( "/" ) ; if ( eInd != - 1 ) { host = host . substring ( 0 , eInd ) ; } host = URLDecoder . decode ( host , StandardCharsets . UTF_8 . toString ( ) ) ; return host ; }
|
Extract host name from the URI
|
1,277
|
public static String getOption ( Properties props , String key ) throws IOException { String val = props . getProperty ( key ) ; if ( val == null ) { throw new IOException ( "Undefined property: " + key ) ; } return val ; }
|
Get a mandatory configuration option
|
1,278
|
public static void updateProperty ( Configuration conf , String prefix , String [ ] altPrefix , String key , Properties props , String propsKey , boolean required ) throws ConfigurationParseException { String val = conf . get ( prefix + key ) ; String altKey = prefix + key ; if ( val == null ) { for ( String alternativePrefix : altPrefix ) { altKey = alternativePrefix + key ; val = conf . get ( altKey ) ; } } if ( required && val == null ) { throw new ConfigurationParseException ( "Missing mandatory configuration: " + key ) ; } if ( val != null ) { LOG . trace ( "Found alternative key {} value {}" , altKey , val ) ; props . setProperty ( propsKey , val . trim ( ) ) ; } }
|
Read key from core - site . xml and parse it to connector configuration
|
1,279
|
public static String extractTaskID ( String path , String identifier ) { LOG . debug ( "extract task id for {}" , path ) ; if ( path . contains ( HADOOP_ATTEMPT ) ) { String prf = path . substring ( path . indexOf ( HADOOP_ATTEMPT ) ) ; if ( prf . contains ( "/" ) ) { return TaskAttemptID . forName ( prf . substring ( 0 , prf . indexOf ( "/" ) ) ) . toString ( ) ; } return TaskAttemptID . forName ( prf ) . toString ( ) ; } else if ( identifier != null && path . contains ( identifier ) ) { int ind = path . indexOf ( identifier ) ; String prf = path . substring ( ind + identifier . length ( ) ) ; int boundary = prf . length ( ) ; if ( prf . indexOf ( "/" ) > 0 ) { boundary = prf . indexOf ( "/" ) ; } String taskID = prf . substring ( 0 , boundary ) ; LOG . debug ( "extracted task id {} for {}" , taskID , path ) ; return taskID ; } return null ; }
|
Extract Hadoop Task ID from path
|
1,280
|
public static long lastModifiedAsLong ( String strTime ) throws IOException { final SimpleDateFormat simpleDateFormat = new SimpleDateFormat ( TIME_PATTERN , Locale . US ) ; try { long lastModified = simpleDateFormat . parse ( strTime ) . getTime ( ) ; if ( lastModified == 0 ) { lastModified = System . currentTimeMillis ( ) ; } return lastModified ; } catch ( ParseException e ) { throw new IOException ( "Failed to parse " + strTime , e ) ; } }
|
Transforms last modified time stamp from String to the long format
|
1,281
|
public static IOException extractException ( String operation , String path , ExecutionException ee ) { IOException ioe ; Throwable cause = ee . getCause ( ) ; if ( cause instanceof AmazonClientException ) { ioe = translateException ( operation , path , ( AmazonClientException ) cause ) ; } else if ( cause instanceof IOException ) { ioe = ( IOException ) cause ; } else { ioe = new IOException ( operation + " failed: " + cause , cause ) ; } return ioe ; }
|
Extract an exception from a failed future and convert to an IOE .
|
1,282
|
static boolean containsInterruptedException ( Throwable thrown ) { if ( thrown == null ) { return false ; } if ( thrown instanceof InterruptedException || thrown instanceof InterruptedIOException ) { return true ; } return containsInterruptedException ( thrown . getCause ( ) ) ; }
|
Recurse down the exception loop looking for any inner details about an interrupted exception .
|
1,283
|
public static int ensureOutputParameterInRange ( String name , long size ) { if ( size > Integer . MAX_VALUE ) { LOG . warn ( "cos: {} capped to ~2.14GB" + " (maximum allowed size with current output mechanism)" , name ) ; return Integer . MAX_VALUE ; } else { return ( int ) size ; } }
|
Ensure that the long value is in the range of an integer .
|
1,284
|
public static COSFileStatus createFileStatus ( Path keyPath , S3ObjectSummary summary , long blockSize ) { long size = summary . getSize ( ) ; return createFileStatus ( keyPath , objectRepresentsDirectory ( summary . getKey ( ) , size ) , size , summary . getLastModified ( ) , blockSize ) ; }
|
Create a files status instance from a listing .
|
1,285
|
public static IStoreClient getStoreClient ( URI fsuri , Configuration conf ) throws IOException { final String fsSchema = fsuri . toString ( ) . substring ( 0 , fsuri . toString ( ) . indexOf ( "://" ) ) ; final ClassLoader classLoader = ObjectStoreVisitor . class . getClassLoader ( ) ; String [ ] supportedSchemas = conf . get ( "fs.stocator.scheme.list" , Constants . SWIFT ) . split ( "," ) ; for ( String scheme : supportedSchemas ) { final String supportedScheme = conf . get ( "fs.stocator." + scheme . trim ( ) + ".scheme" , Constants . SWIFT2D ) ; final String implementation = conf . get ( "fs.stocator." + scheme . trim ( ) + ".impl" , "com.ibm.stocator.fs.swift.SwiftAPIClient" ) ; LOG . debug ( "Stocator schema space : {}, provided {}. Implementation {}" , fsSchema , supportedScheme , implementation ) ; if ( fsSchema . equals ( supportedScheme ) ) { LOG . info ( "Stocator registered as {} for {}" , fsSchema , fsuri . toString ( ) ) ; IStoreClient storeClient ; try { LOG . debug ( "Load implementation class {}" , implementation ) ; if ( fsSchema . equals ( "cos" ) || fsSchema . equals ( "s3d" ) ) { LOG . debug ( "Load direct init for COSAPIClient. Overwrite {}" , implementation ) ; storeClient = new COSAPIClient ( fsuri , conf ) ; } else { final Class < ? > aClass = classLoader . loadClass ( implementation ) ; storeClient = ( IStoreClient ) aClass . getConstructor ( URI . class , Configuration . class ) . newInstance ( fsuri , conf ) ; } } catch ( InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException | SecurityException | ClassNotFoundException e ) { LOG . error ( "Exception in load implementation class {}: {}" , implementation , e . getMessage ( ) ) ; throw new IOException ( "No object store for: " + fsSchema , e ) ; } try { storeClient . initiate ( supportedScheme ) ; return storeClient ; } catch ( IOException e ) { LOG . error ( e . getMessage ( ) ) ; throw e ; } } } throw new IOException ( "No object store for: " + fsSchema ) ; }
|
fs . stocator . scheme . list contains comma separated list of the provided back - end storage drivers . If none provided or key is not present then swift is the default value .
|
1,286
|
public void createAccount ( ) { mAccount = new AccountFactory ( mAccountConfig ) . setHttpClient ( httpclient ) . createAccount ( ) ; mAccess = mAccount . getAccess ( ) ; if ( mRegion != null ) { mAccess . setPreferredRegion ( mRegion ) ; } }
|
Creates account model
|
1,287
|
public void createDummyAccount ( ) { mAccount = new DummyAccountFactory ( mAccountConfig ) . setHttpClient ( httpclient ) . createAccount ( ) ; mAccess = mAccount . getAccess ( ) ; }
|
Creates virtual account . Used for public containers
|
1,288
|
public void authenticate ( ) { if ( mAccount == null ) { createAccount ( ) ; } else { mAccess = mAccount . authenticate ( ) ; if ( mRegion != null ) { mAccess . setPreferredRegion ( mRegion ) ; } } }
|
Authenticates and renew the token
|
1,289
|
public String getAccessURL ( ) { if ( mUsePublicURL ) { LOG . trace ( "Using public URL: " + mAccess . getPublicURL ( ) ) ; return mAccess . getPublicURL ( ) ; } LOG . trace ( "Using internal URL: " + mAccess . getInternalURL ( ) ) ; return mAccess . getInternalURL ( ) ; }
|
Get authenticated URL
|
1,290
|
private synchronized COSDataBlocks . DataBlock createBlockIfNeeded ( ) throws IOException { if ( activeBlock == null ) { blockCount ++ ; if ( blockCount >= COSConstants . MAX_MULTIPART_COUNT ) { LOG . error ( "Number of partitions in stream exceeds limit for S3: " + COSConstants . MAX_MULTIPART_COUNT + " write may fail." ) ; } activeBlock = blockFactory . create ( key , blockCount , blockSize ) ; } return activeBlock ; }
|
Demand create a destination block .
|
1,291
|
private synchronized void uploadCurrentBlock ( ) throws IOException { if ( ! hasActiveBlock ( ) ) { throw new IllegalStateException ( "No active block" ) ; } LOG . debug ( "Writing block # {}" , blockCount ) ; if ( multiPartUpload == null ) { LOG . debug ( "Initiating Multipart upload" ) ; multiPartUpload = new MultiPartUpload ( ) ; } try { multiPartUpload . uploadBlockAsync ( getActiveBlock ( ) ) ; } finally { clearActiveBlock ( ) ; } }
|
Start an asynchronous upload of the current block .
|
1,292
|
private void putObject ( ) throws IOException { LOG . debug ( "Executing regular upload for {}" , writeOperationHelper ) ; final COSDataBlocks . DataBlock block = getActiveBlock ( ) ; int size = block . dataSize ( ) ; final COSDataBlocks . BlockUploadData uploadData = block . startUpload ( ) ; final PutObjectRequest putObjectRequest = uploadData . hasFile ( ) ? writeOperationHelper . newPutRequest ( uploadData . getFile ( ) ) : writeOperationHelper . newPutRequest ( uploadData . getUploadStream ( ) , size ) ; final ObjectMetadata om = new ObjectMetadata ( ) ; om . setUserMetadata ( mMetadata ) ; if ( contentType != null && ! contentType . isEmpty ( ) ) { om . setContentType ( contentType ) ; } else { om . setContentType ( "application/octet-stream" ) ; } putObjectRequest . setMetadata ( om ) ; ListenableFuture < PutObjectResult > putObjectResult = executorService . submit ( new Callable < PutObjectResult > ( ) { public PutObjectResult call ( ) throws Exception { PutObjectResult result ; try { result = writeOperationHelper . putObject ( putObjectRequest ) ; } finally { closeAll ( LOG , uploadData , block ) ; } return result ; } } ) ; clearActiveBlock ( ) ; try { putObjectResult . get ( ) ; } catch ( InterruptedException ie ) { LOG . warn ( "Interrupted object upload" , ie ) ; Thread . currentThread ( ) . interrupt ( ) ; } catch ( ExecutionException ee ) { throw extractException ( "regular upload" , key , ee ) ; } }
|
Upload the current block as a single PUT request ; if the buffer is empty a 0 - byte PUT will be invoked as it is needed to create an entry at the far end .
|
1,293
|
public FSDataOutputStream createObject ( String objName , String contentType , Map < String , String > metadata , Statistics statistics ) throws IOException { final URL url = new URL ( mJossAccount . getAccessURL ( ) + "/" + getURLEncodedObjName ( objName ) ) ; LOG . debug ( "PUT {}. Content-Type : {}" , url . toString ( ) , contentType ) ; String cachedName = getObjName ( container + "/" , objName ) ; objectCache . remove ( cachedName ) ; try { final OutputStream sos ; if ( nonStreamingUpload ) { sos = new SwiftNoStreamingOutputStream ( mJossAccount , url , contentType , metadata , swiftConnectionManager , this ) ; } else { sos = new SwiftOutputStream ( mJossAccount , url , contentType , metadata , swiftConnectionManager ) ; } return new FSDataOutputStream ( sos , statistics ) ; } catch ( IOException e ) { LOG . error ( e . getMessage ( ) ) ; throw e ; } }
|
Direct HTTP PUT request without JOSS package
|
1,294
|
private void setCorrectSize ( StoredObject tmp , Container cObj ) { long objectSize = tmp . getContentLength ( ) ; if ( objectSize == 0 ) { StoredObject soDirect = cObj . getObject ( tmp . getName ( ) ) ; long contentLength = soDirect . getContentLength ( ) ; if ( contentLength > 0 ) { tmp . setContentLength ( contentLength ) ; } } }
|
Swift has a bug where container listing might wrongly report size 0 for large objects . It s seems to be a well known issue in Swift without solution . We have to provide work around for this . If container listing reports size 0 for some object we send additional HEAD on that object to verify it s size .
|
1,295
|
private FileStatus createFileStatus ( StoredObject tmp , Container cObj , String hostName , Path path ) throws IllegalArgumentException , IOException { String newMergedPath = getMergedPath ( hostName , path , tmp . getName ( ) ) ; return new FileStatus ( tmp . getContentLength ( ) , false , 1 , blockSize , Utils . lastModifiedAsLong ( tmp . getLastModified ( ) ) , 0 , null , null , null , new Path ( newMergedPath ) ) ; }
|
Maps StoredObject of JOSS into Hadoop FileStatus
|
1,296
|
public static Properties initialize ( URI uri , Configuration conf , String scheme ) throws IOException { LOG . debug ( "COS driver: initialize start for {} " , uri . toString ( ) ) ; String host = Utils . getHost ( uri ) ; LOG . debug ( "extracted host name from {} is {}" , uri . toString ( ) , host ) ; String bucket = Utils . getContainerName ( host , false ) ; String service = null ; try { service = Utils . getServiceName ( host ) ; } catch ( IOException ex ) { LOG . warn ( "Failed to extract service from the host {}" , host ) ; throw new IOException ( ex ) ; } if ( service == null ) { service = "service" ; } LOG . debug ( "Initiaize for bucket: {}, service: {}" , bucket , service ) ; String [ ] altPrefix = new String [ ] { S3_A_SERVICE_PREFIX + service , S3_D_SERVICE_PREFIX + service } ; String prefix = COS_SERVICE_PREFIX + service ; LOG . debug ( "Filesystem {}, using conf keys for {}. Alternative list {}" , uri , prefix , Arrays . toString ( altPrefix ) ) ; Properties props = new Properties ( ) ; props . setProperty ( COS_BUCKET_PROPERTY , bucket ) ; Utils . updateProperty ( conf , prefix , altPrefix , ACCESS_KEY , props , ACCESS_KEY_COS_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix , altPrefix , SECRET_KEY , props , SECRET_KEY_COS_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix , altPrefix , ENDPOINT_URL , props , ENDPOINT_URL_COS_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix , altPrefix , AUTO_BUCKET_CREATE , props , AUTO_BUCKET_CREATE_COS_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix , altPrefix , V2_SIGNER_TYPE , props , V2_SIGNER_TYPE_COS_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix , altPrefix , INPUT_POLICY , props , INPUT_POLICY_COS_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix , altPrefix , BLOCK_SIZE , props , BLOCK_SIZE_COS_PROPERTY , false ) ; Utils . updateProperty ( conf , prefix , altPrefix , REGION , props , REGION_COS_PROPERTY , false ) ; LOG . debug ( "Initialize completed successfully for bucket {} service {}" , bucket , service ) ; return props ; }
|
Parse configuration properties from the core - site . xml and initialize COS configuration
|
1,297
|
private HttpRequestRetryHandler getRetryHandler ( ) { final HttpRequestRetryHandler myRetryHandler = new HttpRequestRetryHandler ( ) { public boolean retryRequest ( IOException exception , int executionCount , HttpContext context ) { if ( executionCount >= connectionConfiguration . getExecutionCount ( ) ) { LOG . debug ( "Execution count {} is bigger than threshold. Stop" , executionCount ) ; return false ; } if ( exception instanceof NoHttpResponseException ) { LOG . debug ( "NoHttpResponseException exception. Retry count {}" , executionCount ) ; return true ; } if ( exception instanceof UnknownHostException ) { LOG . debug ( "UnknownHostException. Retry count {}" , executionCount ) ; return true ; } if ( exception instanceof ConnectTimeoutException ) { LOG . debug ( "ConnectTimeoutException. Retry count {}" , executionCount ) ; return true ; } if ( exception instanceof SocketTimeoutException || exception . getClass ( ) == SocketTimeoutException . class || exception . getClass ( ) . isInstance ( SocketTimeoutException . class ) ) { LOG . debug ( "socketTimeoutException Retry count {}" , executionCount ) ; return true ; } if ( exception instanceof InterruptedIOException ) { LOG . debug ( "InterruptedIOException Retry count {}" , executionCount ) ; return true ; } if ( exception instanceof SSLException ) { LOG . debug ( "SSLException Retry count {}" , executionCount ) ; return true ; } final HttpClientContext clientContext = HttpClientContext . adapt ( context ) ; final HttpRequest request = clientContext . getRequest ( ) ; boolean idempotent = ! ( request instanceof HttpEntityEnclosingRequest ) ; if ( idempotent ) { LOG . debug ( "HttpEntityEnclosingRequest. Retry count {}" , executionCount ) ; return true ; } LOG . debug ( "Retry stopped. Retry count {}" , executionCount ) ; return false ; } } ; return myRetryHandler ; }
|
Creates custom retry handler to be used if HTTP exception happens
|
1,298
|
public CloseableHttpClient createHttpConnection ( ) { LOG . trace ( "HTTP build new connection based on connection pool" ) ; return HttpClients . custom ( ) . setRetryHandler ( getRetryHandler ( ) ) . setConnectionManager ( connectionPool ) . setDefaultRequestConfig ( rConfig ) . setKeepAliveStrategy ( myStrategy ) . build ( ) ; }
|
Creates HTTP connection based on the connection pool
|
1,299
|
public void setGraphvizCommand ( String cmd ) { if ( cmd != null && cmd . length ( ) == 0 ) cmd = null ; graphvizCommand = cmd ; }
|
Set the Graphviz command that is issued to paint a debugging diagram .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.