idx
int64 0
41.2k
| question
stringlengths 74
4.04k
| target
stringlengths 7
750
|
|---|---|---|
36,900
|
public Record predict ( String text ) { TrainingParameters trainingParameters = ( TrainingParameters ) knowledgeBase . getTrainingParameters ( ) ; Dataframe testDataset = new Dataframe ( knowledgeBase . getConfiguration ( ) ) ; testDataset . add ( new Record ( new AssociativeArray ( AbstractTextExtractor . newInstance ( trainingParameters . getTextExtractorParameters ( ) ) . extract ( StringCleaner . clear ( text ) ) ) , null ) ) ; predict ( testDataset ) ; Record r = testDataset . iterator ( ) . next ( ) ; testDataset . close ( ) ; return r ; }
|
It generates a prediction for a particular string . It returns a Record object which contains the observation data the predicted class and probabilities .
|
36,901
|
public ClassificationMetrics validate ( Dataframe testDataset ) { logger . info ( "validate()" ) ; predict ( testDataset ) ; ClassificationMetrics vm = new ClassificationMetrics ( testDataset ) ; return vm ; }
|
It validates the modeler using the provided dataset and it returns the ClassificationMetrics . The testDataset should contain the real target variables .
|
36,902
|
public ClassificationMetrics validate ( Map < Object , URI > datasets ) { TrainingParameters trainingParameters = ( TrainingParameters ) knowledgeBase . getTrainingParameters ( ) ; Dataframe testDataset = Dataframe . Builder . parseTextFiles ( datasets , AbstractTextExtractor . newInstance ( trainingParameters . getTextExtractorParameters ( ) ) , knowledgeBase . getConfiguration ( ) ) ; ClassificationMetrics vm = validate ( testDataset ) ; testDataset . close ( ) ; return vm ; }
|
It validates the modeler using the provided dataset files . The data map should have as index the names of each class and as values the URIs of the training files . The data files should contain one example per row .
|
36,903
|
protected final String createKnowledgeBaseName ( String storageName , String separator ) { return storageName + separator + getClass ( ) . getSimpleName ( ) ; }
|
Generates a name for the KnowledgeBase .
|
36,904
|
public static < T > Set < Set < T > > combinations ( Set < T > elements , int subsetSize ) { return combinationsStream ( elements , subsetSize ) . collect ( Collectors . toSet ( ) ) ; }
|
Returns all the possible combinations of the set .
|
36,905
|
public static < T > Stream < Set < T > > combinationsStream ( Set < T > elements , int subsetSize ) { if ( subsetSize == 0 ) { return Stream . of ( new HashSet < > ( ) ) ; } else if ( subsetSize <= elements . size ( ) ) { Set < T > remainingElements = elements ; Iterator < T > it = remainingElements . iterator ( ) ; T X = it . next ( ) ; it . remove ( ) ; Stream < Set < T > > combinations = Stream . concat ( combinationsStream ( remainingElements , subsetSize ) , combinationsStream ( remainingElements , subsetSize - 1 ) . map ( s -> { s . add ( X ) ; return s ; } ) ) ; remainingElements . add ( X ) ; return combinations ; } else { return Stream . empty ( ) ; } }
|
Returns all the possible combinations of the set in a stream .
|
36,906
|
public static < T > Iterator < T [ ] > combinationsIterator ( final T [ ] elements , final int subsetSize ) { return new Iterator < T [ ] > ( ) { private int r = 0 ; private int index = 0 ; private final int [ ] selectedIndexes = new int [ subsetSize ] ; private Boolean hasNext = null ; public boolean hasNext ( ) { if ( hasNext == null ) { hasNext = locateNext ( ) ; } return hasNext ; } public T [ ] next ( ) { hasNext = null ; @ SuppressWarnings ( "unchecked" ) T [ ] combination = ( T [ ] ) Array . newInstance ( elements [ 0 ] . getClass ( ) , subsetSize ) ; for ( int i = 0 ; i < subsetSize ; i ++ ) { combination [ i ] = elements [ selectedIndexes [ i ] ] ; } return combination ; } private boolean locateNext ( ) { if ( subsetSize == 0 ) { return false ; } int N = elements . length ; while ( true ) { if ( index <= ( N + ( r - subsetSize ) ) ) { selectedIndexes [ r ] = index ++ ; if ( r == subsetSize - 1 ) { return true ; } else { r ++ ; } } else { r -- ; if ( r < 0 ) { return false ; } index = selectedIndexes [ r ] + 1 ; } } } } ; }
|
Fast and memory efficient way to return an iterator with all the possible combinations of an array .
|
36,907
|
public static < T > Stream < T > stream ( Spliterator < T > spliterator , boolean parallel ) { return StreamSupport . < T > stream ( spliterator , parallel ) ; }
|
Converts an spliterator to a stream .
|
36,908
|
public static < T > Stream < T > stream ( Stream < T > stream , boolean parallel ) { if ( parallel ) { return stream . parallel ( ) ; } else { return stream . sequential ( ) ; } }
|
Converts an Stream to parallel or sequential .
|
36,909
|
public static Method findMethod ( Object obj , String methodName , Object ... params ) { Class < ? > [ ] classArray = new Class < ? > [ params . length ] ; for ( int i = 0 ; i < params . length ; i ++ ) { classArray [ i ] = params [ i ] . getClass ( ) ; } try { Class < ? > klass = obj . getClass ( ) ; while ( klass != null ) { for ( Method method : klass . getDeclaredMethods ( ) ) { if ( method . getName ( ) . equals ( methodName ) && method . getParameterCount ( ) == classArray . length ) { Class < ? > [ ] paramClasses = method . getParameterTypes ( ) ; boolean parametersMatch = true ; for ( int i = 0 ; i < params . length ; i ++ ) { if ( ! paramClasses [ i ] . isAssignableFrom ( classArray [ i ] ) ) { parametersMatch = false ; break ; } } if ( parametersMatch ) { return method ; } } } klass = klass . getSuperclass ( ) ; } throw new NoSuchMethodException ( ) ; } catch ( NoSuchMethodException ex ) { throw new RuntimeException ( ex ) ; } }
|
Finds the public protected default or private method of the object with the provided name and parameters .
|
36,910
|
public Map < String , Double > extract ( final String text ) { Map < Integer , String > ID2word = new HashMap < > ( ) ; Map < Integer , Double > ID2occurrences = new HashMap < > ( ) ; Map < Integer , Integer > position2ID = new LinkedHashMap < > ( ) ; int numberOfWordsInDoc = buildInternalArrays ( text , ID2word , ID2occurrences , position2ID ) ; int maxCombinations = parameters . getMaxCombinations ( ) ; Map < String , Double > keywordsMap = new HashMap < > ( ) ; for ( Map . Entry < Integer , Integer > entry : position2ID . entrySet ( ) ) { Integer wordID = entry . getValue ( ) ; if ( ! useThisWord ( wordID , ID2word , ID2occurrences ) ) { continue ; } Integer position = entry . getKey ( ) ; Map < LinkedList < Integer > , Double > positionCombinationsWithScores = getPositionCombinationsWithinWindow ( position , maxCombinations , ID2word , ID2occurrences , position2ID , numberOfWordsInDoc ) ; for ( Map . Entry < LinkedList < Integer > , Double > entry2 : positionCombinationsWithScores . entrySet ( ) ) { LinkedList < Integer > positionCombination = entry2 . getKey ( ) ; StringBuilder sb = new StringBuilder ( positionCombination . size ( ) * 6 ) ; for ( Integer pos : positionCombination ) { sb . append ( ID2word . get ( position2ID . get ( pos ) ) ) . append ( " " ) ; } if ( sb . length ( ) > 0 ) { String key = sb . toString ( ) . trim ( ) ; double score = entry2 . getValue ( ) ; keywordsMap . put ( key , keywordsMap . getOrDefault ( key , 0.0 ) + score ) ; } } } double minScore = parameters . getMinWordOccurrence ( ) ; Iterator < Map . Entry < String , Double > > it = keywordsMap . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Map . Entry < String , Double > entry = it . next ( ) ; if ( entry . getValue ( ) < minScore ) { it . remove ( ) ; } } return keywordsMap ; }
|
This method gets as input a string and returns as output a map with the extracted keywords along with the number of their scores in the text . Their scores are a combination of occurrences and proximity metrics .
|
36,911
|
public static FlatDataCollection weightedSampling ( AssociativeArray weightedTable , int n , boolean withReplacement ) { FlatDataList sampledIds = new FlatDataList ( ) ; double sumOfFrequencies = Descriptives . sum ( weightedTable . toFlatDataCollection ( ) ) ; int populationN = weightedTable . size ( ) ; for ( int i = 0 ; i < n ; ++ i ) { if ( withReplacement == false && populationN <= n ) { break ; } double randomFrequency = PHPMethods . mt_rand ( 0.0 , sumOfFrequencies ) ; double cumulativeFrequency = 0 ; for ( Map . Entry < Object , Object > entry : weightedTable . entrySet ( ) ) { Object pointID = entry . getKey ( ) ; cumulativeFrequency += TypeInference . toDouble ( entry . getValue ( ) ) ; if ( cumulativeFrequency >= randomFrequency ) { if ( withReplacement == false && sampledIds . contains ( pointID ) ) { continue ; } sampledIds . add ( pointID ) ; break ; } } } return sampledIds . toFlatDataCollection ( ) ; }
|
Samples n ids based on their a Table which contains weights probabilities or frequencies .
|
36,912
|
public static double xbarVariance ( double variance , int sampleN , int populationN ) { if ( populationN <= 0 || sampleN <= 0 || sampleN > populationN ) { throw new IllegalArgumentException ( "All the parameters must be positive and sampleN smaller than populationN." ) ; } double xbarVariance = ( 1.0 - ( double ) sampleN / populationN ) * variance / sampleN ; return xbarVariance ; }
|
Calculates Variance for Xbar for a finite population size
|
36,913
|
public static double xbarStd ( double std , int sampleN ) { return Math . sqrt ( xbarVariance ( std * std , sampleN , Integer . MAX_VALUE ) ) ; }
|
Calculates Standard Deviation for Xbar for infinite population size
|
36,914
|
public static double xbarStd ( double std , int sampleN , int populationN ) { return Math . sqrt ( xbarVariance ( std * std , sampleN , populationN ) ) ; }
|
Calculates Standard Deviation for Xbar for finite population size
|
36,915
|
public static double pbarVariance ( double pbar , int sampleN , int populationN ) { if ( populationN <= 0 || sampleN <= 0 || sampleN > populationN ) { throw new IllegalArgumentException ( "All the parameters must be positive and sampleN smaller than populationN." ) ; } double f = ( double ) sampleN / populationN ; double pbarVariance = ( ( 1.0 - f ) * pbar * ( 1.0 - pbar ) ) / ( sampleN - 1.0 ) ; return pbarVariance ; }
|
Calculates Variance for Pbar for a finite population size
|
36,916
|
public static double pbarStd ( double pbar , int sampleN ) { return Math . sqrt ( pbarVariance ( pbar , sampleN , Integer . MAX_VALUE ) ) ; }
|
Calculates Standard Deviation for Pbar for infinite population size
|
36,917
|
public static double pbarStd ( double pbar , int sampleN , int populationN ) { return Math . sqrt ( pbarVariance ( pbar , sampleN , populationN ) ) ; }
|
Calculates Standard Deviation for Pbar for finite population size
|
36,918
|
public static int minimumSampleSizeForMaximumXbarStd ( double maximumXbarStd , double populationStd , int populationN ) { if ( populationN <= 0 ) { throw new IllegalArgumentException ( "The populationN parameter must be positive." ) ; } double minimumSampleN = 1.0 / ( Math . pow ( maximumXbarStd / populationStd , 2 ) + 1.0 / populationN ) ; return ( int ) Math . ceil ( minimumSampleN ) ; }
|
Returns the minimum required sample size when we set a specific maximum Xbar STD Error for finite population size .
|
36,919
|
public static int minimumSampleSizeForGivenDandMaximumRisk ( double d , double aLevel , double populationStd ) { return minimumSampleSizeForGivenDandMaximumRisk ( d , aLevel , populationStd , Integer . MAX_VALUE ) ; }
|
Returns the minimum required sample size when we set a predifined limit d and a maximum probability Risk a for infinite population size
|
36,920
|
public static int minimumSampleSizeForGivenDandMaximumRisk ( double d , double aLevel , double populationStd , int populationN ) { if ( populationN <= 0 || aLevel <= 0 || d <= 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } double a = 1.0 - aLevel / 2.0 ; double Za = ContinuousDistributions . gaussInverseCdf ( a ) ; double V = Math . pow ( d / Za , 2 ) ; double Ssquare = populationStd * populationStd ; double minimumSampleN = ( Ssquare / V ) * ( 1.0 / ( 1.0 + Ssquare / ( populationN * V ) ) ) ; return ( int ) Math . ceil ( minimumSampleN ) ; }
|
Returns the minimum required sample size when we set a predefined limit d and a maximum probability Risk a for finite population size
|
36,921
|
public static double shinglerSimilarity ( String text1 , String text2 , int w ) { preprocessDocument ( text1 ) ; preprocessDocument ( text2 ) ; NgramsExtractor . Parameters parameters = new NgramsExtractor . Parameters ( ) ; parameters . setMaxCombinations ( w ) ; parameters . setMaxDistanceBetweenKwds ( 0 ) ; parameters . setExaminationWindowLength ( w ) ; NgramsExtractor ngrams = new NgramsExtractor ( parameters ) ; Map < String , Double > keywords1 = ngrams . extract ( text1 ) ; Map < String , Double > keywords2 = ngrams . extract ( text2 ) ; filterKeywordCombinations ( keywords1 , w ) ; filterKeywordCombinations ( keywords2 , w ) ; double totalKeywords = 0.0 ; double commonKeywords = 0.0 ; Set < String > union = new HashSet < > ( keywords1 . keySet ( ) ) ; union . addAll ( keywords2 . keySet ( ) ) ; totalKeywords += union . size ( ) ; Set < String > intersect = new HashSet < > ( keywords1 . keySet ( ) ) ; intersect . retainAll ( keywords2 . keySet ( ) ) ; commonKeywords += intersect . size ( ) ; double resemblance = commonKeywords / totalKeywords ; return resemblance ; }
|
Estimates the w - shingler similarity between two texts . The w is the number of word sequences that are used for the estimation .
|
36,922
|
private void bigMapInitializer ( StorageEngine storageEngine ) { for ( Field field : ReflectionMethods . getAllFields ( new LinkedList < > ( ) , this . getClass ( ) ) ) { if ( field . isAnnotationPresent ( BigMap . class ) ) { initializeBigMapField ( storageEngine , field ) ; } } }
|
Initializes all the fields of the class which are marked with the BigMap annotation automatically .
|
36,923
|
private void initializeBigMapField ( StorageEngine storageEngine , Field field ) { field . setAccessible ( true ) ; try { BigMap a = field . getAnnotation ( BigMap . class ) ; field . set ( this , storageEngine . getBigMap ( field . getName ( ) , a . keyClass ( ) , a . valueClass ( ) , a . mapType ( ) , a . storageHint ( ) , a . concurrent ( ) , false ) ) ; } catch ( IllegalArgumentException | IllegalAccessException ex ) { throw new RuntimeException ( ex ) ; } }
|
Initializes a field which is marked as BigMap .
|
36,924
|
public Trainable put ( String key , Trainable value ) { return bundle . put ( key , value ) ; }
|
Puts the trainable in the bundle using a specific key and returns the previous entry or null .
|
36,925
|
public void setParallelized ( boolean parallelized ) { for ( Trainable t : bundle . values ( ) ) { if ( t != null && t instanceof Parallelizable ) { ( ( Parallelizable ) t ) . setParallelized ( parallelized ) ; } } }
|
Updates the parallelized flag of all wrapped algorithms .
|
36,926
|
public static < T extends Trainable , TP extends Parameterizable > T create ( TP trainingParameters , Configuration configuration ) { try { Class < T > aClass = ( Class < T > ) trainingParameters . getClass ( ) . getEnclosingClass ( ) ; Constructor < T > constructor = aClass . getDeclaredConstructor ( trainingParameters . getClass ( ) , Configuration . class ) ; constructor . setAccessible ( true ) ; return constructor . newInstance ( trainingParameters , configuration ) ; } catch ( InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException ex ) { throw new RuntimeException ( ex ) ; } }
|
Creates a new algorithm based on the provided training parameters .
|
36,927
|
public static < T extends Trainable > T load ( Class < T > aClass , String storageName , Configuration configuration ) { try { Constructor < T > constructor = aClass . getDeclaredConstructor ( String . class , Configuration . class ) ; constructor . setAccessible ( true ) ; return constructor . newInstance ( storageName , configuration ) ; } catch ( InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException ex ) { throw new RuntimeException ( ex ) ; } }
|
Loads an algorithm from the storage .
|
36,928
|
public static double combination ( int n , int k ) { if ( n < k ) { throw new IllegalArgumentException ( "The n can't be smaller than k." ) ; } double combinations = 1.0 ; double lowerBound = n - k ; for ( int i = n ; i > lowerBound ; i -- ) { combinations *= i / ( i - lowerBound ) ; } return combinations ; }
|
It estimates the number of k - combinations of n objects .
|
36,929
|
private StorageType getStorageTypeFromName ( String name ) { for ( Map . Entry < StorageType , DB > entry : storageRegistry . entrySet ( ) ) { DB storage = entry . getValue ( ) ; if ( isOpenStorage ( storage ) && storage . exists ( name ) ) { return entry . getKey ( ) ; } } return null ; }
|
Returns the StorageType using the name of the map . It assumes that names are unique across all StorageType . If not found null is returned .
|
36,930
|
private void closeStorageRegistry ( ) { for ( DB storage : storageRegistry . values ( ) ) { if ( isOpenStorage ( storage ) ) { storage . close ( ) ; } } storageRegistry . clear ( ) ; }
|
It closes all the storageengines in the registry .
|
36,931
|
private boolean blockedStorageClose ( StorageType storageType ) { DB storage = storageRegistry . get ( storageType ) ; if ( isOpenStorage ( storage ) ) { storage . commit ( ) ; Engine e = storage . getEngine ( ) ; while ( EngineWrapper . class . isAssignableFrom ( e . getClass ( ) ) ) { e = ( ( EngineWrapper ) e ) . getWrappedEngine ( ) ; } storage . close ( ) ; while ( ! e . isClosed ( ) ) { logger . trace ( "Waiting for the engine to close" ) ; try { TimeUnit . MILLISECONDS . sleep ( 100 ) ; } catch ( InterruptedException ex ) { throw new RuntimeException ( ex ) ; } } return true ; } else { return false ; } }
|
Closes the provided storage and waits until all changes are written to disk . It should be used when we move the storage to a different location . Returns true if the storage needed to be closed and false if it was not necessary .
|
36,932
|
public List < String > tokenize ( String text ) { List < String > tokens = new ArrayList < > ( Arrays . asList ( text . split ( "[\\p{Z}\\p{C}]+" ) ) ) ; return tokens ; }
|
Separates the tokens of a string by splitting it on white space .
|
36,933
|
public static Map . Entry < Object , Object > maxMin ( DataTable2D payoffMatrix ) { if ( payoffMatrix . isValid ( ) == false ) { throw new IllegalArgumentException ( "The payoff matrix does not have a rectangular format." ) ; } AssociativeArray minPayoffs = new AssociativeArray ( ) ; for ( Map . Entry < Object , AssociativeArray > entry : payoffMatrix . entrySet ( ) ) { AssociativeArray optionList = entry . getValue ( ) ; for ( Map . Entry < Object , Object > entry2 : optionList . entrySet ( ) ) { Object option = entry2 . getKey ( ) ; Double payoff = TypeInference . toDouble ( entry2 . getValue ( ) ) ; Double currentMinPayoffOption = minPayoffs . getDouble ( option ) ; if ( currentMinPayoffOption == null || payoff < currentMinPayoffOption ) { minPayoffs . put ( option , payoff ) ; } } } Map . Entry < Object , Object > entry = MapMethods . selectMaxKeyValue ( minPayoffs ) ; return entry ; }
|
Returns the best option and the payoff under maxMin strategy
|
36,934
|
public static Map . Entry < Object , Object > maxMax ( DataTable2D payoffMatrix ) { if ( payoffMatrix . isValid ( ) == false ) { throw new IllegalArgumentException ( "The payoff matrix does not have a rectangular format." ) ; } Double maxMaxPayoff = Double . NEGATIVE_INFINITY ; Object maxMaxPayoffOption = null ; for ( Map . Entry < Object , AssociativeArray > entry : payoffMatrix . entrySet ( ) ) { AssociativeArray optionList = entry . getValue ( ) ; for ( Map . Entry < Object , Object > entry2 : optionList . entrySet ( ) ) { Double payoff = TypeInference . toDouble ( entry2 . getValue ( ) ) ; if ( payoff > maxMaxPayoff ) { maxMaxPayoff = payoff ; maxMaxPayoffOption = entry2 . getKey ( ) ; } } } return new AbstractMap . SimpleEntry < > ( maxMaxPayoffOption , maxMaxPayoff ) ; }
|
Returns the best option and the payoff under maxMax strategy
|
36,935
|
public static Map . Entry < Object , Object > savage ( DataTable2D payoffMatrix ) { if ( payoffMatrix . isValid ( ) == false ) { throw new IllegalArgumentException ( "The payoff matrix does not have a rectangular format." ) ; } DataTable2D regretMatrix = new DataTable2D ( ) ; for ( Map . Entry < Object , AssociativeArray > entry : payoffMatrix . entrySet ( ) ) { Object event = entry . getKey ( ) ; AssociativeArray optionList = entry . getValue ( ) ; double maxI = Descriptives . max ( optionList . toFlatDataCollection ( ) ) ; for ( Map . Entry < Object , Object > entry2 : optionList . entrySet ( ) ) { Object option = entry2 . getKey ( ) ; Double payoff = TypeInference . toDouble ( entry2 . getValue ( ) ) ; regretMatrix . put2d ( event , option , payoff - maxI ) ; } } return maxMin ( regretMatrix ) ; }
|
Returns the best option and the payoff under savage strategy
|
36,936
|
public static Map . Entry < Object , Object > laplace ( DataTable2D payoffMatrix ) { if ( payoffMatrix . isValid ( ) == false ) { throw new IllegalArgumentException ( "The payoff matrix does not have a rectangular format." ) ; } AssociativeArray optionAverages = new AssociativeArray ( ) ; int numberOfEvents = payoffMatrix . size ( ) ; for ( Map . Entry < Object , AssociativeArray > entry : payoffMatrix . entrySet ( ) ) { AssociativeArray optionList = entry . getValue ( ) ; for ( Map . Entry < Object , Object > entry2 : optionList . entrySet ( ) ) { Object option = entry2 . getKey ( ) ; Double payoff = TypeInference . toDouble ( entry2 . getValue ( ) ) ; Double value = optionAverages . getDouble ( option ) ; if ( value == null ) { value = 0.0 ; } optionAverages . put ( option , value + payoff / numberOfEvents ) ; } } Map . Entry < Object , Object > entry = MapMethods . selectMaxKeyValue ( optionAverages ) ; return entry ; }
|
Returns the best option and the payoff under laplace strategy
|
36,937
|
public static Map . Entry < Object , Object > hurwiczAlpha ( DataTable2D payoffMatrix , double alpha ) { if ( payoffMatrix . isValid ( ) == false ) { throw new IllegalArgumentException ( "The payoff matrix does not have a rectangular format." ) ; } AssociativeArray minPayoffs = new AssociativeArray ( ) ; AssociativeArray maxPayoffs = new AssociativeArray ( ) ; for ( Map . Entry < Object , AssociativeArray > entry : payoffMatrix . entrySet ( ) ) { AssociativeArray optionList = entry . getValue ( ) ; for ( Map . Entry < Object , Object > entry2 : optionList . entrySet ( ) ) { Object option = entry2 . getKey ( ) ; Double payoff = TypeInference . toDouble ( entry2 . getValue ( ) ) ; Double currentMinPayoffOption = minPayoffs . getDouble ( option ) ; if ( currentMinPayoffOption == null || payoff < currentMinPayoffOption ) { minPayoffs . put ( option , payoff ) ; } Double currentMaxPayoffOption = maxPayoffs . getDouble ( option ) ; if ( currentMaxPayoffOption == null || payoff > currentMaxPayoffOption ) { maxPayoffs . put ( option , payoff ) ; } } } AssociativeArray combinedPayoffs = new AssociativeArray ( ) ; for ( Map . Entry < Object , Object > entry : maxPayoffs . entrySet ( ) ) { Object option = entry . getKey ( ) ; combinedPayoffs . put ( option , TypeInference . toDouble ( entry . getValue ( ) ) * alpha + minPayoffs . getDouble ( option ) * ( 1.0 - alpha ) ) ; } Map . Entry < Object , Object > entry = MapMethods . selectMaxKeyValue ( combinedPayoffs ) ; return entry ; }
|
Returns the best option and the payoff under hurwiczAlpha strategy
|
36,938
|
public static Map . Entry < Object , Object > maximumLikelihood ( DataTable2D payoffMatrix , AssociativeArray eventProbabilities ) { if ( payoffMatrix . isValid ( ) == false ) { throw new IllegalArgumentException ( "The payoff matrix does not have a rectangular format." ) ; } Map . Entry < Object , Object > eventEntry = MapMethods . selectMaxKeyValue ( eventProbabilities ) ; Object mostProbableEvent = eventEntry . getKey ( ) ; return MapMethods . selectMaxKeyValue ( payoffMatrix . get ( mostProbableEvent ) ) ; }
|
Returns the best option and the payoff under maximumLikelihood strategy
|
36,939
|
public static Map . Entry < Object , Object > bayes ( DataTable2D payoffMatrix , AssociativeArray eventProbabilities ) { if ( payoffMatrix . isValid ( ) == false ) { throw new IllegalArgumentException ( "The payoff matrix does not have a rectangular format." ) ; } AssociativeArray expectedPayoffs = new AssociativeArray ( ) ; for ( Map . Entry < Object , AssociativeArray > entry : payoffMatrix . entrySet ( ) ) { Object event = entry . getKey ( ) ; AssociativeArray optionList = entry . getValue ( ) ; for ( Map . Entry < Object , Object > entry2 : optionList . entrySet ( ) ) { Object option = entry2 . getKey ( ) ; Double payoff = TypeInference . toDouble ( entry2 . getValue ( ) ) ; Double value = expectedPayoffs . getDouble ( option ) ; if ( value == null ) { value = 0.0 ; } expectedPayoffs . put ( option , value + payoff * eventProbabilities . getDouble ( event ) ) ; } } Map . Entry < Object , Object > entry = MapMethods . selectMaxKeyValue ( expectedPayoffs ) ; return entry ; }
|
Returns the best option and the payoff under bayes strategy
|
36,940
|
private static DataTable2D bivariateMatrix ( Dataframe dataSet , BivariateType type ) { DataTable2D bivariateMatrix = new DataTable2D ( ) ; Map < Object , TypeInference . DataType > columnTypes = dataSet . getXDataTypes ( ) ; Object [ ] allVariables = columnTypes . keySet ( ) . toArray ( ) ; int numberOfVariables = allVariables . length ; TransposeDataList transposeDataList ; for ( int i = 0 ; i < numberOfVariables ; ++ i ) { Object variable0 = allVariables [ i ] ; if ( columnTypes . get ( variable0 ) == TypeInference . DataType . CATEGORICAL ) { continue ; } transposeDataList = new TransposeDataList ( ) ; transposeDataList . put ( 0 , dataSet . getXColumn ( variable0 ) ) ; for ( int j = i ; j < numberOfVariables ; ++ j ) { Object variable1 = allVariables [ j ] ; if ( columnTypes . get ( variable1 ) == TypeInference . DataType . CATEGORICAL ) { continue ; } transposeDataList . put ( 1 , dataSet . getXColumn ( variable1 ) ) ; double value = 0.0 ; if ( type == BivariateType . COVARIANCE ) { value = Descriptives . covariance ( transposeDataList , true ) ; } else if ( type == BivariateType . PEARSONCORRELATION ) { if ( variable0 . equals ( variable1 ) ) { value = 1.0 ; } else { value = PearsonCorrelation . calculateCorrelation ( transposeDataList ) ; } } else if ( type == BivariateType . SPEARMANCORRELATION ) { if ( variable0 . equals ( variable1 ) ) { value = 1.0 ; } else { value = SpearmanCorrelation . calculateCorrelation ( transposeDataList ) ; } } else if ( type == BivariateType . KENDALLTAUCORRELATION ) { if ( variable0 . equals ( variable1 ) ) { value = 1.0 ; } else { value = KendallTauCorrelation . calculateCorrelation ( transposeDataList ) ; } } bivariateMatrix . put2d ( variable0 , variable1 , value ) ; if ( ! variable0 . equals ( variable1 ) ) { bivariateMatrix . put2d ( variable1 , variable0 , value ) ; } } } return bivariateMatrix ; }
|
Calculates BivariateMatrix for a given statistic
|
36,941
|
public static < K > void updateWeights ( double l1 , double l2 , double learningRate , Map < K , Double > weights , Map < K , Double > newWeights ) { L2Regularizer . updateWeights ( l2 , learningRate , weights , newWeights ) ; L1Regularizer . updateWeights ( l1 , learningRate , weights , newWeights ) ; }
|
Updates the weights by applying the ElasticNet regularization .
|
36,942
|
public static < K > double estimatePenalty ( double l1 , double l2 , Map < K , Double > weights ) { double penalty = 0.0 ; penalty += L2Regularizer . estimatePenalty ( l2 , weights ) ; penalty += L1Regularizer . estimatePenalty ( l1 , weights ) ; return penalty ; }
|
Estimates the penalty by adding the ElasticNet regularization .
|
36,943
|
public static int substr_count ( final String string , final String substring ) { if ( substring . length ( ) == 1 ) { return substr_count ( string , substring . charAt ( 0 ) ) ; } int count = 0 ; int idx = 0 ; while ( ( idx = string . indexOf ( substring , idx ) ) != - 1 ) { ++ idx ; ++ count ; } return count ; }
|
Count the number of substring occurrences .
|
36,944
|
public static int substr_count ( final String string , final char character ) { int count = 0 ; int n = string . length ( ) ; for ( int i = 0 ; i < n ; i ++ ) { if ( string . charAt ( i ) == character ) { ++ count ; } } return count ; }
|
Count the number of times a character appears in the string .
|
36,945
|
public static String preg_replace ( String regex , String replacement , String subject ) { Pattern p = Pattern . compile ( regex ) ; return preg_replace ( p , replacement , subject ) ; }
|
Matches a string with a regex and replaces the matched components with a provided string .
|
36,946
|
public static String preg_replace ( Pattern pattern , String replacement , String subject ) { Matcher m = pattern . matcher ( subject ) ; StringBuffer sb = new StringBuffer ( subject . length ( ) ) ; while ( m . find ( ) ) { m . appendReplacement ( sb , replacement ) ; } m . appendTail ( sb ) ; return sb . toString ( ) ; }
|
Matches a string with a pattern and replaces the matched components with a provided string .
|
36,947
|
public static int preg_match ( String regex , String subject ) { Pattern p = Pattern . compile ( regex ) ; return preg_match ( p , subject ) ; }
|
Matches a string with a regex .
|
36,948
|
public static int preg_match ( Pattern pattern , String subject ) { int matches = 0 ; Matcher m = pattern . matcher ( subject ) ; while ( m . find ( ) ) { ++ matches ; } return matches ; }
|
Matches a string with a pattern .
|
36,949
|
public static double round ( double d , int i ) { double multiplier = Math . pow ( 10 , i ) ; return Math . round ( d * multiplier ) / multiplier ; }
|
Rounds a number to a specified precision .
|
36,950
|
public static double log ( double d , double base ) { if ( base == 1.0 || base <= 0.0 ) { throw new IllegalArgumentException ( "Invalid base for logarithm." ) ; } return Math . log ( d ) / Math . log ( base ) ; }
|
Returns the logarithm of a number at an arbitrary base .
|
36,951
|
public static < K , V > Map < V , K > array_flip ( Map < K , V > map ) { Map < V , K > flipped = new HashMap < > ( ) ; for ( Map . Entry < K , V > entry : map . entrySet ( ) ) { flipped . put ( entry . getValue ( ) , entry . getKey ( ) ) ; } return flipped ; }
|
It flips the key and values of a map .
|
36,952
|
public static < T > void shuffle ( T [ ] array , Random rnd ) { T tmp ; for ( int i = array . length - 1 ; i > 0 ; -- i ) { int index = rnd . nextInt ( i + 1 ) ; tmp = array [ index ] ; array [ index ] = array [ i ] ; array [ i ] = tmp ; } }
|
Shuffles the values of any array in place using the provided random generator .
|
36,953
|
public static < T extends Comparable < T > > Integer [ ] asort ( T [ ] array ) { return _asort ( array , false ) ; }
|
Sorts an array in ascending order and returns an array with indexes of the original order .
|
36,954
|
public static < T extends Comparable < T > > Integer [ ] arsort ( T [ ] array ) { return _asort ( array , true ) ; }
|
Sorts an array in descending order and returns an array with indexes of the original order .
|
36,955
|
public static < T > void arrangeByIndex ( T [ ] array , Integer [ ] indexes ) { if ( array . length != indexes . length ) { throw new IllegalArgumentException ( "The length of the two arrays must match." ) ; } for ( int i = 0 ; i < array . length ; i ++ ) { int index = indexes [ i ] ; T tmp = array [ i ] ; array [ i ] = array [ index ] ; array [ index ] = tmp ; } }
|
Rearranges the array based on the order of the provided indexes .
|
36,956
|
public static double [ ] array_clone ( double [ ] a ) { if ( a == null ) { return a ; } return Arrays . copyOf ( a , a . length ) ; }
|
Copies the elements of double array .
|
36,957
|
public static double [ ] [ ] array_clone ( double [ ] [ ] a ) { if ( a == null ) { return a ; } double [ ] [ ] copy = new double [ a . length ] [ ] ; for ( int i = 0 ; i < a . length ; i ++ ) { copy [ i ] = Arrays . copyOf ( a [ i ] , a [ i ] . length ) ; } return copy ; }
|
Copies the elements of double 2D array .
|
36,958
|
public static AssociativeArray sum ( DataTable2D classifierClassProbabilityMatrix ) { AssociativeArray combinedClassProbabilities = new AssociativeArray ( ) ; for ( Map . Entry < Object , AssociativeArray > entry : classifierClassProbabilityMatrix . entrySet ( ) ) { AssociativeArray listOfClassProbabilities = entry . getValue ( ) ; for ( Map . Entry < Object , Object > entry2 : listOfClassProbabilities . entrySet ( ) ) { Object theClass = entry2 . getKey ( ) ; Double probability = TypeInference . toDouble ( entry2 . getValue ( ) ) ; Double previousValue = combinedClassProbabilities . getDouble ( theClass ) ; if ( previousValue == null ) { previousValue = 0.0 ; } combinedClassProbabilities . put ( theClass , previousValue + probability ) ; } } return combinedClassProbabilities ; }
|
Combines the responses of the classifiers by using estimating the sum of the probabilities of their responses .
|
36,959
|
public static AssociativeArray median ( DataTable2D classifierClassProbabilityMatrix ) { AssociativeArray combinedClassProbabilities = new AssociativeArray ( ) ; for ( Map . Entry < Object , AssociativeArray > entry : classifierClassProbabilityMatrix . entrySet ( ) ) { AssociativeArray listOfClassProbabilities = entry . getValue ( ) ; for ( Map . Entry < Object , Object > entry2 : listOfClassProbabilities . entrySet ( ) ) { Object theClass = entry2 . getKey ( ) ; combinedClassProbabilities . put ( theClass , 0.0 ) ; } } for ( Map . Entry < Object , Object > entry : combinedClassProbabilities . entrySet ( ) ) { Object theClass = entry . getKey ( ) ; FlatDataCollection listOfProbabilities = new FlatDataCollection ( new ArrayList < > ( ) ) ; for ( Map . Entry < Object , AssociativeArray > entry2 : classifierClassProbabilityMatrix . entrySet ( ) ) { AssociativeArray listOfClassProbabilities = entry2 . getValue ( ) ; Double probability = listOfClassProbabilities . getDouble ( theClass ) ; if ( probability != null ) { listOfProbabilities . add ( probability ) ; } } combinedClassProbabilities . put ( theClass , Descriptives . median ( listOfProbabilities ) ) ; } return combinedClassProbabilities ; }
|
Combines the responses of the classifiers by using estimating the median of the probabilities of their responses .
|
36,960
|
public static AssociativeArray majorityVote ( DataTable2D classifierClassProbabilityMatrix ) { AssociativeArray combinedClassProbabilities = new AssociativeArray ( ) ; for ( Map . Entry < Object , AssociativeArray > entry : classifierClassProbabilityMatrix . entrySet ( ) ) { AssociativeArray listOfClassProbabilities = entry . getValue ( ) ; for ( Map . Entry < Object , Object > entry2 : listOfClassProbabilities . entrySet ( ) ) { Object theClass = entry2 . getKey ( ) ; combinedClassProbabilities . put ( theClass , 0.0 ) ; } } for ( Map . Entry < Object , AssociativeArray > entry : classifierClassProbabilityMatrix . entrySet ( ) ) { AssociativeArray listOfClassProbabilities = entry . getValue ( ) ; Map . Entry < Object , Object > selectedClassEntry = MapMethods . selectMaxKeyValue ( listOfClassProbabilities ) ; Object theClass = selectedClassEntry . getKey ( ) ; Double previousValue = combinedClassProbabilities . getDouble ( theClass ) ; if ( previousValue == null ) { previousValue = 0.0 ; } combinedClassProbabilities . put ( theClass , previousValue + 1.0 ) ; } return combinedClassProbabilities ; }
|
Combines the responses of the classifiers by summing the votes of each winner class .
|
36,961
|
public AssociativeArray2D getWordProbabilitiesPerTopic ( ) { AssociativeArray2D ptw = new AssociativeArray2D ( ) ; ModelParameters modelParameters = knowledgeBase . getModelParameters ( ) ; TrainingParameters trainingParameters = knowledgeBase . getTrainingParameters ( ) ; int k = trainingParameters . getK ( ) ; for ( int topicId = 0 ; topicId < k ; ++ topicId ) { ptw . put ( topicId , new AssociativeArray ( ) ) ; } int d = modelParameters . getD ( ) ; double beta = trainingParameters . getBeta ( ) ; Map < List < Object > , Integer > topicWordCounts = modelParameters . getTopicWordCounts ( ) ; Map < Integer , Integer > topicCounts = modelParameters . getTopicCounts ( ) ; for ( Map . Entry < List < Object > , Integer > entry : topicWordCounts . entrySet ( ) ) { List < Object > tpk = entry . getKey ( ) ; Integer topicId = ( Integer ) tpk . get ( 0 ) ; Object word = tpk . get ( 1 ) ; Integer njw = entry . getValue ( ) ; Integer nj = topicCounts . get ( topicId ) ; double probability = ( njw + beta ) / ( nj + beta * d ) ; ptw . get ( topicId ) . put ( word , probability ) ; } for ( int topicId = 0 ; topicId < k ; ++ topicId ) { ptw . put ( topicId , MapMethods . sortAssociativeArrayByValueDescending ( ptw . get ( topicId ) ) ) ; } return ptw ; }
|
Returns the distribution of the words in each topic .
|
36,962
|
private < K > void increase ( Map < K , Integer > map , K key ) { map . put ( key , map . getOrDefault ( key , 0 ) + 1 ) ; }
|
Utility method that increases the map value by 1 .
|
36,963
|
private < K > void decrease ( Map < K , Integer > map , K key ) { map . put ( key , map . getOrDefault ( key , 0 ) - 1 ) ; }
|
Utility method that decreases the map value by 1 .
|
36,964
|
protected < T extends Serializable > Map < String , Object > preSerializer ( T serializableObject ) { Map < String , Object > objReferences = new HashMap < > ( ) ; for ( Field field : ReflectionMethods . getAllFields ( new LinkedList < > ( ) , serializableObject . getClass ( ) ) ) { if ( field . isAnnotationPresent ( BigMap . class ) ) { field . setAccessible ( true ) ; try { Object value = field . get ( serializableObject ) ; if ( ! isSerializableBigMap ( value ) ) { objReferences . put ( field . getName ( ) , value ) ; field . set ( serializableObject , null ) ; } } catch ( IllegalArgumentException | IllegalAccessException ex ) { throw new RuntimeException ( ex ) ; } } } return objReferences ; }
|
This method is called before serializing the objects . It extracts all the not - serializable BigMap references of the provided object and stores them in a Map . Then it replaces all the references of the provided object with nulls to avoid their serialization . The main idea is that we temporarily remove from the object any reference that will cause problems during the serialization phase .
|
36,965
|
protected < T extends Serializable > void postSerializer ( T serializableObject , Map < String , Object > objReferences ) { for ( Field field : ReflectionMethods . getAllFields ( new LinkedList < > ( ) , serializableObject . getClass ( ) ) ) { String fieldName = field . getName ( ) ; Object ref = objReferences . remove ( fieldName ) ; if ( ref != null ) { field . setAccessible ( true ) ; try { field . set ( serializableObject , ref ) ; } catch ( IllegalAccessException ex ) { throw new RuntimeException ( ex ) ; } } } }
|
This method is called after the object serialization . It moves all the not - serializable BigMap references from the Map back to the provided object . The main idea is that once the serialization is completed we are allowed to restore back all the references which were removed by the preSerializer .
|
36,966
|
protected < T extends Serializable > void postDeserializer ( T serializableObject ) { Method method = null ; for ( Field field : ReflectionMethods . getAllFields ( new LinkedList < > ( ) , serializableObject . getClass ( ) ) ) { if ( field . isAnnotationPresent ( BigMap . class ) ) { field . setAccessible ( true ) ; try { if ( field . get ( serializableObject ) == null ) { if ( method == null ) { method = ReflectionMethods . findMethod ( serializableObject , "initializeBigMapField" , this , field ) ; } ReflectionMethods . invokeMethod ( serializableObject , method , this , field ) ; } } catch ( IllegalAccessException ex ) { throw new RuntimeException ( ex ) ; } } } }
|
This method is called after the object deserialization . It initializes all BigMaps of the serializable object which have a null value . The main idea is that once an object is deserialized it will contain nulls in all the BigMap fields which were not serialized . For all of those fields we call their initialization methods .
|
36,967
|
public static boolean isActive ( Enum obj ) { Enum value = ACTIVE_SWITCHES . get ( ( Class ) obj . getClass ( ) ) ; return value != null && value == obj ; }
|
Validates whether the feature is active .
|
36,968
|
public boolean isValid ( ) { int totalNumberOfColumns = 0 ; Set < Object > columns = new HashSet < > ( ) ; for ( Map . Entry < Object , AssociativeArray > entry : internalData . entrySet ( ) ) { AssociativeArray row = entry . getValue ( ) ; if ( columns . isEmpty ( ) ) { for ( Object column : row . internalData . keySet ( ) ) { columns . add ( column ) ; } totalNumberOfColumns = columns . size ( ) ; } else { if ( totalNumberOfColumns != row . size ( ) ) { return false ; } for ( Object column : columns ) { if ( row . containsKey ( column ) == false ) { return false ; } } } } return true ; }
|
Returns if the DataTable2D is valid . This data structure is considered valid it all the DataTable cells are set and as a result the DataTable has a rectangular format .
|
36,969
|
protected Object getSelectedClassFromClassScores ( AssociativeArray predictionScores ) { Map . Entry < Object , Object > maxEntry = MapMethods . selectMaxKeyValue ( predictionScores ) ; return maxEntry . getKey ( ) ; }
|
Estimates the selected class from the prediction scores .
|
36,970
|
public static FlatDataCollection randomSampling ( FlatDataList idList , int n , boolean randomizeRecords ) { FlatDataList sampledIds = new FlatDataList ( ) ; int populationN = idList . size ( ) ; Object [ ] keys = idList . toArray ( ) ; if ( randomizeRecords ) { PHPMethods . < Object > shuffle ( keys ) ; } int k = populationN / n ; if ( k < 2 ) { throw new IllegalArgumentException ( "The number of systematics is too small." ) ; } int randomSystematic = PHPMethods . mt_rand ( 0 , k - 1 ) ; for ( int i = randomSystematic ; i < keys . length ; i += k ) { Object pointID = keys [ i ] ; sampledIds . add ( pointID ) ; } return sampledIds . toFlatDataCollection ( ) ; }
|
Samples n ids by using Systematic Sampling
|
36,971
|
private CL getFromClusterMap ( int clusterId , Map < Integer , CL > clusterMap ) { CL c = clusterMap . get ( clusterId ) ; if ( c . getFeatureIds ( ) == null ) { c . setFeatureIds ( knowledgeBase . getModelParameters ( ) . getFeatureIds ( ) ) ; } return c ; }
|
Always use this method to get the cluster from the clusterMap because it ensures that the featureIds are set . The featureIds can be unset if we use a data structure which stores stuff in file . Since the featureIds field of cluster is transient the information gets lost . This function ensures that it sets it back .
|
36,972
|
protected String getDirectory ( ) { String directory = storageConfiguration . getDirectory ( ) ; if ( directory == null || directory . isEmpty ( ) ) { directory = System . getProperty ( "java.io.tmpdir" ) ; } return directory ; }
|
Returns the location of the directory from the configuration or the temporary directory if not defined .
|
36,973
|
protected Path getRootPath ( String storageName ) { return Paths . get ( getDirectory ( ) + File . separator + storageName ) ; }
|
Returns the root path of the storage .
|
36,974
|
protected boolean deleteIfExistsRecursively ( Path path ) throws IOException { try { return Files . deleteIfExists ( path ) ; } catch ( DirectoryNotEmptyException ex ) { Files . walkFileTree ( path , new SimpleFileVisitor < Path > ( ) { public FileVisitResult visitFile ( Path file , BasicFileAttributes attrs ) throws IOException { Files . delete ( file ) ; return FileVisitResult . CONTINUE ; } public FileVisitResult postVisitDirectory ( Path dir , IOException exc ) throws IOException { Files . delete ( dir ) ; return FileVisitResult . CONTINUE ; } } ) ; return true ; } }
|
Deletes the file or directory recursively if it exists .
|
36,975
|
protected boolean deleteDirectory ( Path path , boolean cleanParent ) throws IOException { boolean pathExists = deleteIfExistsRecursively ( path ) ; if ( pathExists && cleanParent ) { cleanEmptyParentDirectory ( path . getParent ( ) ) ; return true ; } return false ; }
|
Deletes a directory and optionally removes the parent directory if it becomes empty .
|
36,976
|
private void cleanEmptyParentDirectory ( Path path ) throws IOException { Path normPath = path . normalize ( ) ; if ( normPath . equals ( Paths . get ( getDirectory ( ) ) . normalize ( ) ) || normPath . equals ( Paths . get ( System . getProperty ( "java.io.tmpdir" ) ) . normalize ( ) ) ) { return ; } try { Files . deleteIfExists ( path ) ; cleanEmptyParentDirectory ( path . getParent ( ) ) ; } catch ( DirectoryNotEmptyException ex ) { } }
|
Removes recursively all empty parent directories up to and excluding the storage directory .
|
36,977
|
protected boolean moveDirectory ( Path src , Path target ) throws IOException { if ( Files . exists ( src ) ) { createDirectoryIfNotExists ( target . getParent ( ) ) ; deleteDirectory ( target , false ) ; Files . move ( src , target ) ; cleanEmptyParentDirectory ( src . getParent ( ) ) ; return true ; } else { return false ; } }
|
Moves a directory in the target location .
|
36,978
|
protected boolean createDirectoryIfNotExists ( Path path ) throws IOException { if ( ! Files . exists ( path ) ) { Files . createDirectories ( path ) ; return true ; } else { return false ; } }
|
Creates the directory in the target location if it does not exist .
|
36,979
|
public static double simpleMovingAverage ( FlatDataList flatDataList , int N ) { double SMA = 0 ; int counter = 0 ; for ( int i = flatDataList . size ( ) - 1 ; i >= 0 ; -- i ) { double Yti = flatDataList . getDouble ( i ) ; if ( counter >= N ) { break ; } SMA += Yti ; ++ counter ; } SMA /= counter ; return SMA ; }
|
Simple Moving Average
|
36,980
|
public static double weightedMovingAverage ( FlatDataList flatDataList , int N ) { double WMA = 0 ; double denominator = 0.0 ; int counter = 0 ; for ( int i = flatDataList . size ( ) - 1 ; i >= 0 ; -- i ) { double Yti = flatDataList . getDouble ( i ) ; if ( counter >= N ) { break ; } double weight = ( N - counter ) ; WMA += weight * Yti ; denominator += weight ; ++ counter ; } WMA /= denominator ; return WMA ; }
|
Weighted Moving Average
|
36,981
|
public static double simpleExponentialSmoothing ( FlatDataList flatDataList , double a ) { double EMA = 0 ; int count = 0 ; for ( int i = flatDataList . size ( ) - 1 ; i >= 0 ; -- i ) { double Yti = flatDataList . getDouble ( i ) ; EMA += a * Math . pow ( 1 - a , count ) * Yti ; ++ count ; } return EMA ; }
|
Simple Explonential Smoothing
|
36,982
|
public static Double largest ( Iterator < Double > elements , int k ) { Iterator < Double > oppositeElements = new Iterator < Double > ( ) { public boolean hasNext ( ) { return elements . hasNext ( ) ; } public Double next ( ) { return - elements . next ( ) ; } } ; return - smallest ( oppositeElements , k ) ; }
|
Selects the kth largest element from an iterable object .
|
36,983
|
public static double nBar ( TransposeDataList clusterIdList ) { int populationM = clusterIdList . size ( ) ; double nBar = 0.0 ; for ( Map . Entry < Object , FlatDataList > entry : clusterIdList . entrySet ( ) ) { nBar += ( double ) entry . getValue ( ) . size ( ) / populationM ; } return nBar ; }
|
Returns the mean cluster size .
|
36,984
|
public static TransposeDataCollection randomSampling ( TransposeDataList clusterIdList , int sampleM ) { TransposeDataCollection sampledIds = new TransposeDataCollection ( ) ; Object [ ] selectedClusters = clusterIdList . keySet ( ) . toArray ( ) ; PHPMethods . < Object > shuffle ( selectedClusters ) ; for ( int i = 0 ; i < sampleM ; ++ i ) { Object cluster = selectedClusters [ i ] ; sampledIds . put ( cluster , clusterIdList . get ( cluster ) . toFlatDataCollection ( ) ) ; } return sampledIds ; }
|
Samples m clusters by using Cluster Sampling
|
36,985
|
public static String tokenizeSmileys ( String text ) { for ( Map . Entry < String , String > smiley : SMILEYS_MAPPING . entrySet ( ) ) { text = text . replaceAll ( smiley . getKey ( ) , smiley . getValue ( ) ) ; } return text ; }
|
Replaces all the SMILEYS_MAPPING within the text with their tokens .
|
36,986
|
public static String unifyTerminators ( String text ) { text = text . replaceAll ( "[\",:;()\\-]+" , " " ) ; text = text . replaceAll ( "[\\.!?]" , "." ) ; text = text . replaceAll ( "\\.[\\. ]+" , "." ) ; text = text . replaceAll ( "\\s*\\.\\s*" , ". " ) ; return text . trim ( ) ; }
|
Replaces all terminators with space or dots . The final string will contain only alphanumerics and dots .
|
36,987
|
public static String removeAccents ( String text ) { text = Normalizer . normalize ( text , Normalizer . Form . NFD ) ; text = text . replaceAll ( "[\\p{InCombiningDiacriticalMarks}]" , "" ) ; return text ; }
|
Removes all accepts from the text .
|
36,988
|
public static String clear ( String text ) { text = StringCleaner . tokenizeURLs ( text ) ; text = StringCleaner . tokenizeSmileys ( text ) ; text = StringCleaner . removeAccents ( text ) ; text = StringCleaner . removeSymbols ( text ) ; text = StringCleaner . removeExtraSpaces ( text ) ; return text . toLowerCase ( Locale . ENGLISH ) ; }
|
Convenience method which tokenizes the URLs and the SMILEYS_MAPPING removes accents and symbols and eliminates the extra spaces from the provided text .
|
36,989
|
public static double getScoreValue ( DataTable2D dataTable ) { AssociativeArray result = getScore ( dataTable ) ; double score = result . getDouble ( "score" ) ; return score ; }
|
Convenience method to get the score of Chisquare .
|
36,990
|
public static LPResult solve ( double [ ] linearObjectiveFunction , List < LPSolver . LPConstraint > linearConstraintsList , boolean nonNegative , boolean maximize ) { int m = linearConstraintsList . size ( ) ; List < LinearConstraint > constraints = new ArrayList < > ( m ) ; for ( LPSolver . LPConstraint constraint : linearConstraintsList ) { String sign = constraint . getSign ( ) ; Relationship relationship = null ; if ( LPSolver . GEQ . equals ( sign ) ) { relationship = Relationship . GEQ ; } else if ( LPSolver . LEQ . equals ( sign ) ) { relationship = Relationship . LEQ ; } else if ( LPSolver . EQ . equals ( sign ) ) { relationship = Relationship . EQ ; } constraints . add ( new LinearConstraint ( constraint . getContraintBody ( ) , relationship , constraint . getValue ( ) ) ) ; } SimplexSolver solver = new SimplexSolver ( ) ; PointValuePair solution = solver . optimize ( new LinearObjectiveFunction ( linearObjectiveFunction , 0.0 ) , new LinearConstraintSet ( constraints ) , maximize ? GoalType . MAXIMIZE : GoalType . MINIMIZE , new NonNegativeConstraint ( nonNegative ) , PivotSelectionRule . BLAND ) ; LPResult result = new LPResult ( ) ; result . setObjectiveValue ( solution . getValue ( ) ) ; result . setVariableValues ( solution . getPoint ( ) ) ; return result ; }
|
Solves the LP problem and returns the result .
|
36,991
|
public final Object get2d ( Object key1 , Object key2 ) { AssociativeArray tmp = internalData . get ( key1 ) ; if ( tmp == null ) { return null ; } return tmp . internalData . get ( key2 ) ; }
|
Convenience function to get the value by using both keys .
|
36,992
|
public final Object put2d ( Object key1 , Object key2 , Object value ) { AssociativeArray tmp = internalData . get ( key1 ) ; if ( tmp == null ) { internalData . put ( key1 , new AssociativeArray ( ) ) ; } return internalData . get ( key1 ) . internalData . put ( key2 , value ) ; }
|
Convenience function used to put a value in a particular key positions .
|
36,993
|
public static String joinURL ( Map < URLParts , String > urlParts ) { try { URI uri = new URI ( urlParts . get ( URLParts . PROTOCOL ) , urlParts . get ( URLParts . AUTHORITY ) , urlParts . get ( URLParts . PATH ) , urlParts . get ( URLParts . QUERY ) , urlParts . get ( URLParts . REF ) ) ; return uri . toString ( ) ; } catch ( URISyntaxException ex ) { throw new RuntimeException ( ex ) ; } }
|
This method can be used to build a URL from its parts .
|
36,994
|
public static Map < DomainParts , String > splitDomain ( String domain ) { Map < DomainParts , String > domainParts = null ; String [ ] dottedParts = domain . trim ( ) . toLowerCase ( Locale . ENGLISH ) . split ( "\\." ) ; if ( dottedParts . length == 2 ) { domainParts = new HashMap < > ( ) ; domainParts . put ( DomainParts . SUBDOMAIN , null ) ; domainParts . put ( DomainParts . DOMAINNAME , dottedParts [ 0 ] ) ; domainParts . put ( DomainParts . TLD , dottedParts [ 1 ] ) ; } else if ( dottedParts . length > 2 ) { int n = dottedParts . length ; if ( COMMON_TLD . contains ( dottedParts [ n - 2 ] + "." + dottedParts [ n - 1 ] ) || ( COMMON_TLD . contains ( dottedParts [ n - 1 ] ) && COMMON_SLD . contains ( dottedParts [ n - 2 ] ) ) || ( COMMON_TLD . contains ( dottedParts [ n - 2 ] ) && COMMON_SLD . contains ( dottedParts [ n - 1 ] ) ) ) { domainParts = new HashMap < > ( ) ; domainParts . put ( DomainParts . TLD , dottedParts [ n - 2 ] + "." + dottedParts [ n - 1 ] ) ; domainParts . put ( DomainParts . DOMAINNAME , dottedParts [ n - 3 ] ) ; StringBuilder sb = new StringBuilder ( dottedParts [ 0 ] ) ; for ( int i = 1 ; i < n - 3 ; ++ i ) { sb . append ( "." ) . append ( dottedParts [ i ] ) ; } domainParts . put ( DomainParts . SUBDOMAIN , sb . toString ( ) ) ; } else if ( COMMON_TLD . contains ( dottedParts [ n - 1 ] ) ) { domainParts = new HashMap < > ( ) ; domainParts . put ( DomainParts . TLD , dottedParts [ n - 1 ] ) ; domainParts . put ( DomainParts . DOMAINNAME , dottedParts [ n - 2 ] ) ; StringBuilder sb = new StringBuilder ( dottedParts [ 0 ] ) ; for ( int i = 1 ; i < n - 2 ; ++ i ) { sb . append ( "." ) . append ( dottedParts [ i ] ) ; } domainParts . put ( DomainParts . SUBDOMAIN , sb . toString ( ) ) ; } } return domainParts ; }
|
Splits a domain name to parts and returns them in a map .
|
36,995
|
public static double fleschKincaidReadingEase ( String strText ) { strText = cleanText ( strText ) ; return PHPMethods . round ( ( 206.835 - ( 1.015 * averageWordsPerSentence ( strText ) ) - ( 84.6 * averageSyllablesPerWord ( strText ) ) ) , 1 ) ; }
|
Returns the Flesch - Kincaid Reading Ease of text entered rounded to one digit .
|
36,996
|
public static double fleschKincaidGradeLevel ( String strText ) { strText = cleanText ( strText ) ; return PHPMethods . round ( ( ( 0.39 * averageWordsPerSentence ( strText ) ) + ( 11.8 * averageSyllablesPerWord ( strText ) ) - 15.59 ) , 1 ) ; }
|
Returns the Flesch - Kincaid Grade level of text entered rounded to one digit .
|
36,997
|
public static double gunningFogScore ( String strText ) { strText = cleanText ( strText ) ; return PHPMethods . round ( ( ( averageWordsPerSentence ( strText ) + percentageWordsWithThreeSyllables ( strText ) ) * 0.4 ) , 1 ) ; }
|
Returns the Gunning - Fog score of text entered rounded to one digit .
|
36,998
|
public static double colemanLiauIndex ( String strText ) { strText = cleanText ( strText ) ; int intWordCount = wordCount ( strText ) ; return PHPMethods . round ( ( ( 5.89 * ( letterCount ( strText ) / ( double ) intWordCount ) ) - ( 0.3 * ( sentenceCount ( strText ) / ( double ) intWordCount ) ) - 15.8 ) , 1 ) ; }
|
Returns the Coleman - Liau Index of text entered rounded to one digit .
|
36,999
|
public static double smogIndex ( String strText ) { strText = cleanText ( strText ) ; return PHPMethods . round ( 1.043 * Math . sqrt ( ( wordsWithThreeSyllables ( strText ) * ( 30.0 / sentenceCount ( strText ) ) ) + 3.1291 ) , 1 ) ; }
|
Returns the SMOG Index of text entered rounded to one digit .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.