idx
int64 0
41.2k
| question
stringlengths 74
4.04k
| target
stringlengths 7
750
|
|---|---|---|
36,800
|
protected void keepTopFeatures ( Map < Object , Double > featureScores , int maxFeatures ) { logger . debug ( "keepTopFeatures()" ) ; logger . debug ( "Estimating the minPermittedScore" ) ; Double minPermittedScore = SelectKth . largest ( featureScores . values ( ) . iterator ( ) , maxFeatures ) ; logger . debug ( "Removing features with scores less than threshold" ) ; Iterator < Map . Entry < Object , Double > > it = featureScores . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Map . Entry < Object , Double > entry = it . next ( ) ; if ( entry . getValue ( ) < minPermittedScore ) { it . remove ( ) ; } } int numOfExtraFeatures = featureScores . size ( ) - maxFeatures ; if ( numOfExtraFeatures > 0 ) { logger . debug ( "Removing extra features caused by ties" ) ; it = featureScores . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) && numOfExtraFeatures > 0 ) { Map . Entry < Object , Double > entry = it . next ( ) ; if ( entry . getValue ( ) - minPermittedScore <= 0.0 ) { it . remove ( ) ; -- numOfExtraFeatures ; } } } }
|
This method keeps the highest scoring features of the provided feature map and removes all the others .
|
36,801
|
protected void removeRareFeatures ( Map < Object , Double > featureCounts , int rareFeatureThreshold ) { logger . debug ( "removeRareFeatures()" ) ; Iterator < Map . Entry < Object , Double > > it = featureCounts . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Map . Entry < Object , Double > entry = it . next ( ) ; if ( entry . getValue ( ) < rareFeatureThreshold ) { it . remove ( ) ; } } }
|
Removes any feature with less occurrences than the threshold .
|
36,802
|
public static TransposeDataCollection weightedProbabilitySampling ( AssociativeArray2D strataFrequencyTable , AssociativeArray nh , boolean withReplacement ) { TransposeDataCollection sampledIds = new TransposeDataCollection ( ) ; for ( Map . Entry < Object , AssociativeArray > entry : strataFrequencyTable . entrySet ( ) ) { Object strata = entry . getKey ( ) ; Number sampleN = ( ( Number ) nh . get ( strata ) ) ; if ( sampleN == null ) { continue ; } sampledIds . put ( strata , SimpleRandomSampling . weightedSampling ( entry . getValue ( ) , sampleN . intValue ( ) , withReplacement ) ) ; } return sampledIds ; }
|
Samples nh ids from each strata based on their Frequency Table
|
36,803
|
public static TransposeDataCollection randomSampling ( TransposeDataList strataIdList , AssociativeArray nh , boolean withReplacement ) { TransposeDataCollection sampledIds = new TransposeDataCollection ( ) ; for ( Map . Entry < Object , FlatDataList > entry : strataIdList . entrySet ( ) ) { Object strata = entry . getKey ( ) ; Number sampleN = ( ( Number ) nh . get ( strata ) ) ; if ( sampleN == null ) { continue ; } sampledIds . put ( strata , SimpleRandomSampling . randomSampling ( entry . getValue ( ) , sampleN . intValue ( ) , withReplacement ) ) ; } return sampledIds ; }
|
Samples nh ids from each strata by using Stratified Sampling
|
36,804
|
public static double variance ( TransposeDataCollection sampleDataCollection , AssociativeArray populationNh ) { double variance = 0.0 ; int populationN = 0 ; double mean = mean ( sampleDataCollection , populationNh ) ; for ( Map . Entry < Object , FlatDataCollection > entry : sampleDataCollection . entrySet ( ) ) { Object strata = entry . getKey ( ) ; Integer strataPopulation = ( ( Number ) populationNh . get ( strata ) ) . intValue ( ) ; if ( strataPopulation == null ) { throw new IllegalArgumentException ( "Invalid strata population size." ) ; } populationN += strataPopulation ; variance += ( strataPopulation - 1 ) * SimpleRandomSampling . variance ( entry . getValue ( ) ) ; variance += strataPopulation * Math . pow ( SimpleRandomSampling . mean ( entry . getValue ( ) ) - mean , 2 ) ; } variance /= ( populationN - 1 ) ; return variance ; }
|
Calculate the variance from the sample
|
36,805
|
public static double std ( TransposeDataCollection sampleDataCollection , AssociativeArray populationNh ) { return Math . sqrt ( variance ( sampleDataCollection , populationNh ) ) ; }
|
Calculate the standard deviation of the sample
|
36,806
|
public static AssociativeArray optimumSampleSize ( int n , AssociativeArray populationNh , AssociativeArray populationStdh ) { AssociativeArray nh = new AssociativeArray ( ) ; double sumNhSh = 0.0 ; for ( Map . Entry < Object , Object > entry : populationNh . entrySet ( ) ) { Object strata = entry . getKey ( ) ; Integer populationInStrata = ( ( Number ) entry . getValue ( ) ) . intValue ( ) ; Double populationStd = populationStdh . getDouble ( strata ) ; if ( populationStd == null || populationInStrata <= 0.0 ) { throw new IllegalArgumentException ( "Invalid strata population or strata std." ) ; } double NhSh = populationInStrata * populationStd ; sumNhSh += NhSh ; nh . put ( strata , n * NhSh ) ; } if ( sumNhSh <= 0 ) { throw new IllegalArgumentException ( "Invalid strata populations." ) ; } for ( Map . Entry < Object , Object > entry : nh . entrySet ( ) ) { Object strata = entry . getKey ( ) ; nh . put ( strata , TypeInference . toDouble ( entry . getValue ( ) ) / sumNhSh ) ; } return nh ; }
|
Returns the optimum sample size per strata under Neyman Allocation
|
36,807
|
public static < T > void throttledExecution ( Stream < T > stream , Consumer < T > consumer , ConcurrencyConfiguration concurrencyConfiguration ) { if ( concurrencyConfiguration . isParallelized ( ) ) { int maxThreads = concurrencyConfiguration . getMaxNumberOfThreadsPerTask ( ) ; int maxTasks = 2 * maxThreads ; ExecutorService executorService = Executors . newFixedThreadPool ( maxThreads ) ; ThrottledExecutor executor = new ThrottledExecutor ( executorService , maxTasks ) ; stream . sequential ( ) . forEach ( i -> { executor . execute ( ( ) -> { consumer . accept ( i ) ; } ) ; } ) ; executorService . shutdown ( ) ; try { executorService . awaitTermination ( Integer . MAX_VALUE , TimeUnit . SECONDS ) ; } catch ( InterruptedException ex ) { throw new RuntimeException ( ex ) ; } } else { Runnable runnable = ( ) -> stream . forEach ( consumer ) ; runnable . run ( ) ; } }
|
Takes the items of the stream in a throttled way and provides them to the consumer . It uses as many threads as the available processors and it does not start more tasks than 2 times the previous number .
|
36,808
|
protected static double betinc ( double x , double A , double B ) { double A0 = 0.0 ; double B0 = 1.0 ; double A1 = 1.0 ; double B1 = 1.0 ; double M9 = 0.0 ; double A2 = 0.0 ; while ( Math . abs ( ( A1 - A2 ) / A1 ) > 0.00001 ) { A2 = A1 ; double C9 = - ( A + M9 ) * ( A + B + M9 ) * x / ( A + 2.0 * M9 ) / ( A + 2.0 * M9 + 1.0 ) ; A0 = A1 + C9 * A0 ; B0 = B1 + C9 * B0 ; M9 = M9 + 1 ; C9 = M9 * ( B - M9 ) * x / ( A + 2.0 * M9 - 1.0 ) / ( A + 2.0 * M9 ) ; A1 = A0 + C9 * A1 ; B1 = B0 + C9 * B1 ; A0 = A0 / B1 ; B0 = B0 / B1 ; A1 = A1 / B1 ; B1 = 1.0 ; } return A1 / A ; }
|
Internal function used by StudentCdf
|
36,809
|
public static double exponentialCdf ( double x , double lamda ) { if ( x < 0 || lamda <= 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } double probability = 1.0 - Math . exp ( - lamda * x ) ; return probability ; }
|
Calculates the probability from 0 to X under Exponential Distribution
|
36,810
|
public static double betaCdf ( double x , double a , double b ) { if ( x < 0 || a <= 0 || b <= 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } double Bcdf = 0.0 ; if ( x == 0 ) { return Bcdf ; } else if ( x >= 1 ) { Bcdf = 1.0 ; return Bcdf ; } double S = a + b ; double BT = Math . exp ( logGamma ( S ) - logGamma ( b ) - logGamma ( a ) + a * Math . log ( x ) + b * Math . log ( 1 - x ) ) ; if ( x < ( a + 1.0 ) / ( S + 2.0 ) ) { Bcdf = BT * betinc ( x , a , b ) ; } else { Bcdf = 1.0 - BT * betinc ( 1.0 - x , b , a ) ; } return Bcdf ; }
|
Calculates the probability from 0 to X under Beta Distribution
|
36,811
|
public static double fCdf ( double x , int f1 , int f2 ) { if ( x < 0 || f1 <= 0 || f2 <= 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } double Z = x / ( x + ( double ) f2 / f1 ) ; double FCdf = betaCdf ( Z , f1 / 2.0 , f2 / 2.0 ) ; return FCdf ; }
|
Calculates the probability from 0 to X under F Distribution
|
36,812
|
public static double gammaCdf ( double x , double a , double b ) { if ( a <= 0 || b <= 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } double GammaCdf = ContinuousDistributions . gammaCdf ( x / b , a ) ; return GammaCdf ; }
|
Calculates the probability from 0 to X under Gamma Distribution
|
36,813
|
public static double uniformCdf ( double x , double a , double b ) { if ( a >= b ) { throw new IllegalArgumentException ( "The a must be smaller than b." ) ; } double probabilitySum ; if ( x < a ) { probabilitySum = 0.0 ; } else if ( x < b ) { probabilitySum = ( x - a ) / ( b - a ) ; } else { probabilitySum = 1 ; } return probabilitySum ; }
|
Returns the cumulative probability of Uniform
|
36,814
|
public static double kolmogorov ( double z ) { if ( z < 0.27 ) { return 0.0 ; } else if ( z > 3.2 ) { return 1.1 ; } double ks = 0 ; double y = - 2 * z * z ; for ( int i = 27 ; i >= 1 ; i = i - 2 ) { ks = Math . exp ( i * y ) * ( 1 - ks ) ; } return 1.0 - 2.0 * ks ; }
|
Returns the cumulative probability of kolmogorov
|
36,815
|
public static double dirichletPdf ( double [ ] pi , double [ ] ai ) { double probability = 1.0 ; double sumAi = 0.0 ; double productGammaAi = 1.0 ; double tmp ; int piLength = pi . length ; for ( int i = 0 ; i < piLength ; ++ i ) { tmp = ai [ i ] ; sumAi += tmp ; productGammaAi *= gamma ( tmp ) ; probability *= Math . pow ( pi [ i ] , tmp - 1 ) ; } probability *= gamma ( sumAi ) / productGammaAi ; return probability ; }
|
Calculates probability pi ai under dirichlet distribution
|
36,816
|
public static double dirichletPdf ( double [ ] pi , double a ) { double probability = 1.0 ; int piLength = pi . length ; for ( int i = 0 ; i < piLength ; ++ i ) { probability *= Math . pow ( pi [ i ] , a - 1 ) ; } double sumAi = piLength * a ; double productGammaAi = Math . pow ( gamma ( a ) , piLength ) ; probability *= gamma ( sumAi ) / productGammaAi ; return probability ; }
|
Implementation for single alpha value .
|
36,817
|
public static double [ ] multinomialGaussianSample ( double [ ] mean , double [ ] [ ] covariance ) { MultivariateNormalDistribution gaussian = new MultivariateNormalDistribution ( mean , covariance ) ; gaussian . reseedRandomGenerator ( RandomGenerator . getThreadLocalRandom ( ) . nextLong ( ) ) ; return gaussian . sample ( ) ; }
|
Samples from Multinomial Normal Distribution .
|
36,818
|
public static double multinomialGaussianPdf ( double [ ] mean , double [ ] [ ] covariance , double [ ] x ) { MultivariateNormalDistribution gaussian = new MultivariateNormalDistribution ( mean , covariance ) ; return gaussian . density ( x ) ; }
|
Calculates the PDF of Multinomial Normal Distribution for a particular x .
|
36,819
|
public static Map . Entry < Object , Double > selectMaxKeyValue ( Map < Object , Double > keyValueMap ) { Double maxValue = Double . NEGATIVE_INFINITY ; Object maxValueKey = null ; for ( Map . Entry < Object , Double > entry : keyValueMap . entrySet ( ) ) { Double value = entry . getValue ( ) ; if ( value != null && value > maxValue ) { maxValue = value ; maxValueKey = entry . getKey ( ) ; } } return new AbstractMap . SimpleEntry < > ( maxValueKey , keyValueMap . get ( maxValueKey ) ) ; }
|
Selects the key - value entry with the largest value .
|
36,820
|
public static Map . Entry < Object , Object > selectMinKeyValue ( AssociativeArray keyValueMap ) { Double minValue = Double . POSITIVE_INFINITY ; Object minValueKey = null ; for ( Map . Entry < Object , Object > entry : keyValueMap . entrySet ( ) ) { Double value = TypeInference . toDouble ( entry . getValue ( ) ) ; if ( value != null && value < minValue ) { minValue = value ; minValueKey = entry . getKey ( ) ; } } return new AbstractMap . SimpleEntry < > ( minValueKey , keyValueMap . get ( minValueKey ) ) ; }
|
Selects the key - value entry with the smallest value .
|
36,821
|
public static < K , V > Map < K , V > sortNumberMapByKeyAscending ( Map < K , V > map ) { return sortNumberMapByKeyAscending ( map . entrySet ( ) ) ; }
|
Sorts by Key a Map in ascending order .
|
36,822
|
public static < K , V > Map < K , V > sortNumberMapByKeyDescending ( Map < K , V > map ) { return sortNumberMapByKeyDescending ( map . entrySet ( ) ) ; }
|
Sorts by Key a Map in descending order .
|
36,823
|
public static < K , V > Map < K , V > sortNumberMapByValueDescending ( Map < K , V > map ) { ArrayList < Map . Entry < K , V > > entries = new ArrayList < > ( map . entrySet ( ) ) ; Collections . sort ( entries , ( Map . Entry < K , V > a , Map . Entry < K , V > b ) -> { Double va = TypeInference . toDouble ( a . getValue ( ) ) ; Double vb = TypeInference . toDouble ( b . getValue ( ) ) ; return - va . compareTo ( vb ) ; } ) ; Map < K , V > sortedMap = new LinkedHashMap < > ( ) ; for ( Map . Entry < K , V > entry : entries ) { sortedMap . put ( entry . getKey ( ) , entry . getValue ( ) ) ; } return sortedMap ; }
|
Sorts by Value a Map in descending order .
|
36,824
|
public static AssociativeArray sortAssociativeArrayByValueAscending ( AssociativeArray associativeArray ) { ArrayList < Map . Entry < Object , Object > > entries = new ArrayList < > ( associativeArray . entrySet ( ) ) ; Collections . sort ( entries , ( Map . Entry < Object , Object > a , Map . Entry < Object , Object > b ) -> { Double va = TypeInference . toDouble ( a . getValue ( ) ) ; Double vb = TypeInference . toDouble ( b . getValue ( ) ) ; return va . compareTo ( vb ) ; } ) ; AssociativeArray sortedAssociativeArray = new AssociativeArray ( ) ; for ( Map . Entry < Object , Object > entry : entries ) { sortedAssociativeArray . put ( entry . getKey ( ) , entry . getValue ( ) ) ; } return sortedAssociativeArray ; }
|
Sorts by Value a Associative Array in ascending order .
|
36,825
|
private static String unescapeHtml ( final String input ) { StringBuilder writer = null ; int len = input . length ( ) ; int i = 1 ; int st = 0 ; while ( true ) { while ( i < len && input . charAt ( i - 1 ) != '&' ) { i ++ ; } if ( i >= len ) { break ; } int j = i ; while ( j < len && j < i + MAX_ESCAPE + 1 && input . charAt ( j ) != ';' ) { j ++ ; } if ( j == len || j < i + MIN_ESCAPE || j == i + MAX_ESCAPE + 1 ) { i ++ ; continue ; } if ( input . charAt ( i ) == '#' ) { int k = i + 1 ; int radix = 10 ; final char firstChar = input . charAt ( k ) ; if ( firstChar == 'x' || firstChar == 'X' ) { k ++ ; radix = 16 ; } try { int entityValue = Integer . parseInt ( input . substring ( k , j ) , radix ) ; if ( writer == null ) { writer = new StringBuilder ( input . length ( ) ) ; } writer . append ( input . substring ( st , i - 1 ) ) ; if ( entityValue > 0xFFFF ) { final char [ ] chrs = Character . toChars ( entityValue ) ; writer . append ( chrs [ 0 ] ) ; writer . append ( chrs [ 1 ] ) ; } else if ( entityValue == 39 ) { writer . append ( '\'' ) ; } else { writer . append ( entityValue ) ; } } catch ( NumberFormatException ex ) { i ++ ; continue ; } } else { CharSequence value = LOOKUP_MAP . get ( input . substring ( i , j ) ) ; if ( value == null ) { i ++ ; continue ; } if ( writer == null ) { writer = new StringBuilder ( input . length ( ) ) ; } writer . append ( input . substring ( st , i - 1 ) ) ; writer . append ( value ) ; } st = j + 1 ; i = st ; } if ( writer != null ) { writer . append ( input . substring ( st , len ) ) ; return writer . toString ( ) ; } return input ; }
|
Unescapes HTML3 chars from a string .
|
36,826
|
public static String replaceImgWithAlt ( String html ) { Matcher m = IMG_ALT_TITLE_PATTERN . matcher ( html ) ; if ( m . find ( ) ) { return m . replaceAll ( " $1 " ) ; } return html ; }
|
Replaces the img tags with their alt text .
|
36,827
|
public static String safeRemoveAllTags ( String html ) { html = removeNonTextTags ( html ) ; html = unsafeRemoveAllTags ( html ) ; return html ; }
|
A safe way to remove the tags from an HTML string . The method removes first javascript css and other non text blocks and then removes all HTML tags .
|
36,828
|
public static String extractText ( String html ) { html = replaceImgWithAlt ( html ) ; html = safeRemoveAllTags ( html ) ; html = unescapeHtml ( html ) ; return html ; }
|
Extracts the text from an HTML page .
|
36,829
|
public static String extractTitle ( String html ) { Matcher m = TITLE_PATTERN . matcher ( html ) ; if ( m . find ( ) ) { return clear ( m . group ( 0 ) ) ; } return null ; }
|
Extracts the title of the page .
|
36,830
|
public static Map < HyperlinkPart , List < String > > extractHyperlinks ( String html ) { Map < HyperlinkPart , List < String > > hyperlinksMap = new HashMap < > ( ) ; hyperlinksMap . put ( HyperlinkPart . HTMLTAG , new ArrayList < > ( ) ) ; hyperlinksMap . put ( HyperlinkPart . URL , new ArrayList < > ( ) ) ; hyperlinksMap . put ( HyperlinkPart . ANCHORTEXT , new ArrayList < > ( ) ) ; Matcher m = HYPERLINK_PATTERN . matcher ( html ) ; while ( m . find ( ) ) { if ( m . groupCount ( ) == 2 ) { String tag = m . group ( 0 ) ; String url = m . group ( 1 ) ; String anchortext = m . group ( 2 ) ; hyperlinksMap . get ( HyperlinkPart . HTMLTAG ) . add ( tag ) ; hyperlinksMap . get ( HyperlinkPart . URL ) . add ( url ) ; hyperlinksMap . get ( HyperlinkPart . ANCHORTEXT ) . add ( anchortext ) ; } } return hyperlinksMap ; }
|
Extracts the hyperlinks from an html string and returns their components in a map .
|
36,831
|
public static Map < String , String > extractMetatags ( String html ) { Map < String , String > metatagsMap = new HashMap < > ( ) ; Matcher m = METATAG_PATTERN . matcher ( html ) ; while ( m . find ( ) ) { if ( m . groupCount ( ) == 2 ) { String name = m . group ( 1 ) ; String content = m . group ( 2 ) ; metatagsMap . put ( clear ( name ) , clear ( content ) ) ; } } return metatagsMap ; }
|
Extracts the meta tags from an HTML page and returns them in a map .
|
36,832
|
public static double normalDistribution ( Double x , AssociativeArray params ) { double mean = params . getDouble ( "mean" ) ; double variance = params . getDouble ( "variance" ) ; double z = ( x - mean ) / Math . sqrt ( variance ) ; return ContinuousDistributions . gaussCdf ( z ) ; }
|
Cumulative Normal Distribution Method . This method is called via reflection .
|
36,833
|
public static double bernoulliCdf ( int k , double p ) { if ( p < 0 ) { throw new IllegalArgumentException ( "The probability p can't be negative." ) ; } double probabilitySum = 0.0 ; if ( k < 0 ) { } else if ( k < 1 ) { probabilitySum = ( 1 - p ) ; } else { probabilitySum = 1.0 ; } return probabilitySum ; }
|
Returns the cumulative probability under bernoulli
|
36,834
|
public static double binomial ( int k , double p , int n ) { if ( k < 0 || p < 0 || n < 1 ) { throw new IllegalArgumentException ( "All the parameters must be positive and n larger than 1." ) ; } k = Math . min ( k , n ) ; double probability = approxBinomialCdf ( k , p , n ) ; if ( k > 0 ) { probability -= approxBinomialCdf ( k - 1 , p , n ) ; } return probability ; }
|
Returns the probability of k of a specific number of tries n and probability p
|
36,835
|
public static double binomialCdf ( int k , double p , int n ) { if ( k < 0 || p < 0 || n < 1 ) { throw new IllegalArgumentException ( "All the parameters must be positive and n larger than 1." ) ; } k = Math . min ( k , n ) ; double probabilitySum = approxBinomialCdf ( k , p , n ) ; return probabilitySum ; }
|
Returns the cumulative probability of k of a specific number of tries n and probability p
|
36,836
|
private static double approxBinomialCdf ( int k , double p , int n ) { double Z = p ; double A = k + 1 ; double B = n - k ; double S = A + B ; double BT = Math . exp ( ContinuousDistributions . logGamma ( S ) - ContinuousDistributions . logGamma ( B ) - ContinuousDistributions . logGamma ( A ) + A * Math . log ( Z ) + B * Math . log ( 1 - Z ) ) ; double probabilitySum ; if ( Z < ( A + 1 ) / ( S + 2 ) ) { probabilitySum = BT * ContinuousDistributions . betinc ( Z , A , B ) ; } else { probabilitySum = 1.0 - BT * ContinuousDistributions . betinc ( 1.0 - Z , B , A ) ; } probabilitySum = 1.0 - probabilitySum ; return probabilitySum ; }
|
Returns the a good approximation of cumulative probability of k of a specific number of tries n and probability p
|
36,837
|
public static double geometric ( int k , double p ) { if ( k <= 0 || p < 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } double probability = Math . pow ( 1 - p , k - 1 ) * p ; return probability ; }
|
Returns the probability that the first success requires k trials with probability of success p
|
36,838
|
public static double geometricCdf ( int k , double p ) { if ( k <= 0 || p < 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } double probabilitySum = 0.0 ; for ( int i = 1 ; i <= k ; ++ i ) { probabilitySum += geometric ( i , p ) ; } return probabilitySum ; }
|
Returns the cumulative probability of geometric
|
36,839
|
public static double negativeBinomial ( int n , int r , double p ) { if ( n < 0 || r < 0 || p < 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } n = Math . max ( n , r ) ; double probability = Arithmetics . combination ( n - 1 , r - 1 ) * Math . pow ( 1 - p , n - r ) * Math . pow ( p , r ) ; return probability ; }
|
Returns the probability of requiring n tries to achieve r successes with probability of success p
|
36,840
|
public static double negativeBinomialCdf ( int n , int r , double p ) { if ( n < 0 || r < 0 || p < 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } n = Math . max ( n , r ) ; double probabilitySum = 0.0 ; for ( int i = 0 ; i <= r ; ++ i ) { probabilitySum += negativeBinomial ( n , i , p ) ; } return probabilitySum ; }
|
Returns the cumulative probability of negativeBinomial
|
36,841
|
public static double uniformCdf ( int k , int n ) { if ( k < 0 || n < 1 ) { throw new IllegalArgumentException ( "All the parameters must be positive and n larger than 1." ) ; } k = Math . min ( k , n ) ; double probabilitySum = k * uniform ( n ) ; return probabilitySum ; }
|
Returns the cumulative probability of uniform
|
36,842
|
public static double hypergeometric ( int k , int n , int Kp , int Np ) { if ( k < 0 || n < 0 || Kp < 0 || Np < 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } Kp = Math . max ( k , Kp ) ; Np = Math . max ( n , Np ) ; double probability = approxHypergeometricCdf ( k , n , Kp , Np ) ; if ( k > 0 ) { probability -= approxHypergeometricCdf ( k - 1 , n , Kp , Np ) ; } return probability ; }
|
Returns the probability of finding k successes on a sample of n from a population with Kp successes and size Np
|
36,843
|
public static double hypergeometricCdf ( int k , int n , int Kp , int Np ) { if ( k < 0 || n < 0 || Kp < 0 || Np < 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } Kp = Math . max ( k , Kp ) ; Np = Math . max ( n , Np ) ; double probabilitySum = approxHypergeometricCdf ( k , n , Kp , Np ) ; return probabilitySum ; }
|
Returns the cumulative probability of hypergeometric
|
36,844
|
public static double poisson ( int k , double lamda ) { if ( k < 0 || lamda < 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } double probability = poissonCdf ( k , lamda ) ; if ( k > 0 ) { probability -= poissonCdf ( k - 1 , lamda ) ; } return probability ; }
|
Returns the probability of k occurrences when rate is lamda
|
36,845
|
public static double poissonCdf ( int k , double lamda ) { if ( k < 0 || lamda < 0 ) { throw new IllegalArgumentException ( "All the parameters must be positive." ) ; } double probabilitySum = 1.0 - ContinuousDistributions . gammaCdf ( lamda , k + 1 ) ; return probabilitySum ; }
|
Returns the cumulative probability of poisson
|
36,846
|
public static int count ( Iterable it ) { int n = 0 ; for ( Object v : it ) { if ( v != null ) { ++ n ; } } return n ; }
|
Returns the number of not - null items in the iteratable .
|
36,847
|
public static double sum ( FlatDataCollection flatDataCollection ) { double sum = 0.0 ; Iterator < Double > it = flatDataCollection . iteratorDouble ( ) ; while ( it . hasNext ( ) ) { Double value = it . next ( ) ; if ( value != null ) { sum += value ; } } return sum ; }
|
Returns the sum of a Collection
|
36,848
|
public static double mean ( FlatDataCollection flatDataCollection ) { int n = 0 ; double mean = 0.0 ; Iterator < Double > it = flatDataCollection . iteratorDouble ( ) ; while ( it . hasNext ( ) ) { Double value = it . next ( ) ; if ( value != null ) { ++ n ; mean += value ; } } if ( n == 0 ) { throw new IllegalArgumentException ( "No not null values where found in the collection." ) ; } mean /= n ; return mean ; }
|
Calculates the simple mean
|
36,849
|
public static double meanSE ( FlatDataCollection flatDataCollection ) { double std = std ( flatDataCollection , true ) ; double meanSE = std / Math . sqrt ( count ( flatDataCollection ) ) ; return meanSE ; }
|
Calculates Standard Error of Mean under SRS
|
36,850
|
public static double median ( FlatDataCollection flatDataCollection ) { double [ ] doubleArray = flatDataCollection . stream ( ) . filter ( x -> x != null ) . mapToDouble ( TypeInference :: toDouble ) . toArray ( ) ; int n = doubleArray . length ; if ( n == 0 ) { throw new IllegalArgumentException ( "The provided collection can't be empty." ) ; } Arrays . sort ( doubleArray ) ; double median ; if ( n % 2 == 0 ) { median = ( doubleArray [ n / 2 - 1 ] + doubleArray [ n / 2 ] ) / 2.0 ; } else { median = doubleArray [ n / 2 ] ; } return median ; }
|
Calculates the median .
|
36,851
|
public static double min ( FlatDataCollection flatDataCollection ) { double min = Double . POSITIVE_INFINITY ; Iterator < Double > it = flatDataCollection . iteratorDouble ( ) ; while ( it . hasNext ( ) ) { Double v = it . next ( ) ; if ( v != null && min > v ) { min = v ; } } return min ; }
|
Calculates Minimum .
|
36,852
|
public static double max ( FlatDataCollection flatDataCollection ) { double max = Double . NEGATIVE_INFINITY ; Iterator < Double > it = flatDataCollection . iteratorDouble ( ) ; while ( it . hasNext ( ) ) { Double v = it . next ( ) ; if ( v != null && max < v ) { max = v ; } } return max ; }
|
Calculates Maximum .
|
36,853
|
public static double minAbsolute ( FlatDataCollection flatDataCollection ) { double minAbs = Double . POSITIVE_INFINITY ; Iterator < Double > it = flatDataCollection . iteratorDouble ( ) ; while ( it . hasNext ( ) ) { Double v = it . next ( ) ; if ( v != null ) { minAbs = Math . min ( minAbs , Math . abs ( v ) ) ; } } return minAbs ; }
|
Calculates Minimum absolute value .
|
36,854
|
public static double maxAbsolute ( FlatDataCollection flatDataCollection ) { double maxAbs = 0.0 ; Iterator < Double > it = flatDataCollection . iteratorDouble ( ) ; while ( it . hasNext ( ) ) { Double v = it . next ( ) ; if ( v != null ) { maxAbs = Math . max ( maxAbs , Math . abs ( v ) ) ; } } return maxAbs ; }
|
Calculates Maximum absolute value .
|
36,855
|
public static double geometricMean ( FlatDataCollection flatDataCollection ) { int n = 0 ; double geometricMean = 0.0 ; Iterator < Double > it = flatDataCollection . iteratorDouble ( ) ; while ( it . hasNext ( ) ) { Double v = it . next ( ) ; if ( v != null ) { if ( v <= 0.0 ) { throw new IllegalArgumentException ( "Negative or zero values are not allowed." ) ; } ++ n ; geometricMean += Math . log ( v ) ; } } geometricMean = Math . exp ( geometricMean / n ) ; return geometricMean ; }
|
Calculates Geometric Mean
|
36,856
|
public static double harmonicMean ( FlatDataCollection flatDataCollection ) { int n = 0 ; double harmonicMean = 0.0 ; Iterator < Double > it = flatDataCollection . iteratorDouble ( ) ; while ( it . hasNext ( ) ) { Double v = it . next ( ) ; if ( v != null ) { ++ n ; harmonicMean += 1.0 / v ; } } harmonicMean = n / harmonicMean ; return harmonicMean ; }
|
Calculates Harmonic Mean
|
36,857
|
public static double variance ( FlatDataCollection flatDataCollection , boolean isSample ) { int n = 0 ; double mean = 0.0 ; double squaredMean = 0.0 ; Iterator < Double > it = flatDataCollection . iteratorDouble ( ) ; while ( it . hasNext ( ) ) { Double v = it . next ( ) ; if ( v != null ) { ++ n ; mean += v ; squaredMean += v * v ; } } if ( n <= 1 ) { throw new IllegalArgumentException ( "The provided collection must have more than 1 elements." ) ; } mean /= n ; squaredMean /= n ; double variance = squaredMean - mean * mean ; if ( isSample ) { variance *= n / ( n - 1.0 ) ; } return variance ; }
|
Calculates the Variance
|
36,858
|
public static double std ( FlatDataCollection flatDataCollection , boolean isSample ) { double variance = variance ( flatDataCollection , isSample ) ; double std = Math . sqrt ( variance ) ; return std ; }
|
Calculates the Standard Deviation
|
36,859
|
public static double cv ( double std , double mean ) { if ( mean == 0 ) { return Double . POSITIVE_INFINITY ; } double cv = std / mean ; return cv ; }
|
Calculates Coefficient of variation
|
36,860
|
public static double moment ( FlatDataCollection flatDataCollection , int r ) { double mean = mean ( flatDataCollection ) ; return moment ( flatDataCollection , r , mean ) ; }
|
Calculates Moment R if the mean is not known .
|
36,861
|
public static double moment ( FlatDataCollection flatDataCollection , int r , double mean ) { int n = 0 ; double moment = 0.0 ; Iterator < Double > it = flatDataCollection . iteratorDouble ( ) ; while ( it . hasNext ( ) ) { Double v = it . next ( ) ; if ( v != null ) { ++ n ; moment += Math . pow ( v - mean , r ) ; } } if ( n <= 1 ) { throw new IllegalArgumentException ( "The provided collection must have more than 1 elements." ) ; } moment /= n ; return moment ; }
|
Calculates Moment R if the mean is known .
|
36,862
|
public static AssociativeArray percentiles ( FlatDataCollection flatDataCollection , int cutPoints ) { double [ ] doubleArray = flatDataCollection . stream ( ) . filter ( x -> x != null ) . mapToDouble ( TypeInference :: toDouble ) . toArray ( ) ; int n = doubleArray . length ; if ( n <= 0 || cutPoints <= 0 || n < cutPoints ) { throw new IllegalArgumentException ( "All the parameters must be positive and n larger than cutPoints." ) ; } Arrays . sort ( doubleArray ) ; AssociativeArray percintiles = new AssociativeArray ( ) ; double counter = 1.0 ; while ( true ) { double perc = counter / cutPoints ; Double tc2 = ( n + 1.0 ) * perc ; int CCk2 = tc2 . intValue ( ) ; int CCk2_plus1 = CCk2 + 1 ; int Ck2 = CCk2 - 1 ; int Ck2_plus1 = CCk2_plus1 - 1 ; double g2Star = tc2 - CCk2 ; if ( Ck2 < doubleArray . length ) { Double key = 100 * perc ; Double Ck2Value = doubleArray [ Ck2 ] ; if ( Ck2_plus1 < doubleArray . length ) { Double Ck2_plus1Value = doubleArray [ Ck2_plus1 ] ; percintiles . put ( key , ( 1 - g2Star ) * Ck2Value + g2Star * Ck2_plus1Value ) ; } else { percintiles . put ( key , Ck2Value ) ; break ; } } else { break ; } ++ counter ; } return percintiles ; }
|
Calculates the percentiles given a number of cutPoints
|
36,863
|
public static double autocorrelation ( FlatDataList flatDataList , int lags ) { int n = count ( flatDataList ) ; if ( n <= 0 || lags <= 0 || n < lags ) { throw new IllegalArgumentException ( "All the parameters must be positive and n larger than lags." ) ; } FlatDataCollection flatDataCollection = flatDataList . toFlatDataCollection ( ) ; double mean = mean ( flatDataCollection ) ; double variance = variance ( flatDataCollection , true ) ; double Ak = 0.0 ; int maxI = n - lags ; for ( int i = 0 ; i < maxI ; ++ i ) { Ak += ( flatDataList . getDouble ( i ) - mean ) * ( flatDataList . getDouble ( i + lags ) - mean ) ; } Ak /= ( n - lags ) ; double autocorrelation = Ak / variance ; return autocorrelation ; }
|
Calculates the autocorrelation of a flatDataCollection for a predifined lag
|
36,864
|
public static AssociativeArray frequencies ( FlatDataCollection flatDataCollection ) { AssociativeArray frequencies = new AssociativeArray ( ) ; for ( Object value : flatDataCollection ) { Object counter = frequencies . get ( value ) ; if ( counter == null ) { frequencies . put ( value , 1 ) ; } else { frequencies . put ( value , ( ( Number ) counter ) . intValue ( ) + 1 ) ; } } return frequencies ; }
|
Calculates the Frequency Table
|
36,865
|
public static void normalize ( AssociativeArray associativeArray ) { double sum = 0.0 ; for ( Map . Entry < Object , Object > entry : associativeArray . entrySet ( ) ) { Double value = TypeInference . toDouble ( entry . getValue ( ) ) ; associativeArray . put ( entry . getKey ( ) , value ) ; sum += value ; } if ( sum != 0.0 ) { for ( Map . Entry < Object , Object > entry : associativeArray . entrySet ( ) ) { associativeArray . put ( entry . getKey ( ) , TypeInference . toDouble ( entry . getValue ( ) ) / sum ) ; } } }
|
Normalizes the provided associative array by dividing its values with the sum of the observations .
|
36,866
|
public static void normalizeExp ( AssociativeArray associativeArray ) { double max = max ( associativeArray . toFlatDataCollection ( ) ) ; double sum = 0.0 ; for ( Map . Entry < Object , Object > entry : associativeArray . entrySet ( ) ) { Double value = Math . exp ( TypeInference . toDouble ( entry . getValue ( ) ) - max ) ; associativeArray . put ( entry . getKey ( ) , value ) ; sum += value ; } if ( sum != 0.0 ) { for ( Map . Entry < Object , Object > entry : associativeArray . entrySet ( ) ) { associativeArray . put ( entry . getKey ( ) , TypeInference . toDouble ( entry . getValue ( ) ) / sum ) ; } } }
|
Normalizes the exponentials of provided associative array by using the log - sum - exp trick .
|
36,867
|
public Map < Integer , String > extract ( final String text ) { Set < String > tmpKwd = new LinkedHashSet < > ( generateTokenizer ( ) . tokenize ( text ) ) ; Map < Integer , String > keywordSequence = new LinkedHashMap < > ( ) ; int position = 0 ; for ( String keyword : tmpKwd ) { keywordSequence . put ( position , keyword ) ; ++ position ; } return keywordSequence ; }
|
This method gets as input a string and returns as output a numbered sequence of the unique tokens . In the returned map as keys we store the position of the word in the original string and as value the actual unique token in that position . Note that the sequence includes only the position of the first occurrence of each word while the next occurrences are ignored .
|
36,868
|
private static void setStorageEngine ( Dataframe dataset ) { if ( storageEngine == null ) { synchronized ( DataframeMatrix . class ) { if ( storageEngine == null ) { String storageName = "mdf" + RandomGenerator . getThreadLocalRandomUnseeded ( ) . nextLong ( ) ; storageEngine = dataset . configuration . getStorageConfiguration ( ) . createStorageEngine ( storageName ) ; } } } }
|
Initializes the static storage engine if it s not already set .
|
36,869
|
public static DataframeMatrix newInstance ( Dataframe dataset , boolean addConstantColumn , Map < Integer , Integer > recordIdsReference , Map < Object , Integer > featureIdsReference ) { if ( ! featureIdsReference . isEmpty ( ) ) { throw new IllegalArgumentException ( "The featureIdsReference map should be empty." ) ; } setStorageEngine ( dataset ) ; int n = dataset . size ( ) ; int d = dataset . xColumnSize ( ) ; if ( addConstantColumn ) { ++ d ; } DataframeMatrix m = new DataframeMatrix ( new MapRealMatrix ( n , d ) , new MapRealVector ( n ) ) ; if ( dataset . isEmpty ( ) ) { return m ; } boolean extractY = ( dataset . getYDataType ( ) == TypeInference . DataType . NUMERICAL ) ; int featureId = 0 ; if ( addConstantColumn ) { for ( int row = 0 ; row < n ; ++ row ) { m . X . setEntry ( row , featureId , 1.0 ) ; } featureIdsReference . put ( Dataframe . COLUMN_NAME_CONSTANT , featureId ) ; ++ featureId ; } int rowId = 0 ; for ( Map . Entry < Integer , Record > e : dataset . entries ( ) ) { Integer rId = e . getKey ( ) ; Record r = e . getValue ( ) ; if ( recordIdsReference != null ) { recordIdsReference . put ( rId , rowId ) ; } if ( extractY ) { m . Y . setEntry ( rowId , TypeInference . toDouble ( r . getY ( ) ) ) ; } for ( Map . Entry < Object , Object > entry : r . getX ( ) . entrySet ( ) ) { Object feature = entry . getKey ( ) ; Integer knownFeatureId = featureIdsReference . get ( feature ) ; if ( knownFeatureId == null ) { featureIdsReference . put ( feature , featureId ) ; knownFeatureId = featureId ; ++ featureId ; } Double value = TypeInference . toDouble ( entry . getValue ( ) ) ; if ( value != null ) { m . X . setEntry ( rowId , knownFeatureId , value ) ; } } ++ rowId ; } return m ; }
|
Method used to generate a training Dataframe to a DataframeMatrix and extracts its contents to Matrixes . It populates the featureIdsReference map with the mappings between the feature names and the column ids of the matrix . Typically used to convert the training dataset .
|
36,870
|
public static RealVector parseRecord ( Record r , Map < Object , Integer > featureIdsReference ) { if ( featureIdsReference . isEmpty ( ) ) { throw new IllegalArgumentException ( "The featureIdsReference map should not be empty." ) ; } int d = featureIdsReference . size ( ) ; RealVector v = ( storageEngine != null ) ? new MapRealVector ( d ) : new OpenMapRealVector ( d ) ; boolean addConstantColumn = featureIdsReference . containsKey ( Dataframe . COLUMN_NAME_CONSTANT ) ; if ( addConstantColumn ) { v . setEntry ( 0 , 1.0 ) ; } for ( Map . Entry < Object , Object > entry : r . getX ( ) . entrySet ( ) ) { Object feature = entry . getKey ( ) ; Double value = TypeInference . toDouble ( entry . getValue ( ) ) ; if ( value != null ) { Integer featureId = featureIdsReference . get ( feature ) ; if ( featureId != null ) { v . setEntry ( featureId , value ) ; } } else { } } return v ; }
|
Parses a single Record and converts it to RealVector by using an already existing mapping between feature names and column ids .
|
36,871
|
public void setMaxNumberOfThreadsPerTask ( Integer maxNumberOfThreadsPerTask ) { if ( maxNumberOfThreadsPerTask < 0 ) { throw new IllegalArgumentException ( "The max number of threads can not be negative." ) ; } else if ( maxNumberOfThreadsPerTask == 0 ) { this . maxNumberOfThreadsPerTask = AVAILABLE_PROCESSORS ; } else { this . maxNumberOfThreadsPerTask = Math . min ( maxNumberOfThreadsPerTask , 4 * AVAILABLE_PROCESSORS ) ; } }
|
Setter for the maximum number of threads which can be used for a specific task by the framework . By convention if the value is 0 the max number of threads is set equal to the available processors .
|
36,872
|
public < T > void forEach ( Stream < T > stream , Consumer < ? super T > action ) { Runnable runnable = ( ) -> stream . forEach ( action ) ; ThreadMethods . forkJoinExecution ( runnable , concurrencyConfiguration , stream . isParallel ( ) ) ; }
|
Executes forEach on the provided stream . If the Stream is parallel it is executed using the custom pool else it is executed directly from the main thread .
|
36,873
|
public < T , R > Stream < R > map ( Stream < T > stream , Function < ? super T , ? extends R > mapper ) { Callable < Stream < R > > callable = ( ) -> stream . map ( mapper ) ; return ThreadMethods . forkJoinExecution ( callable , concurrencyConfiguration , stream . isParallel ( ) ) ; }
|
Executes map on the provided stream . If the Stream is parallel it is executed using the custom pool else it is executed directly from the main thread .
|
36,874
|
public < T , R , A > R collect ( Stream < T > stream , Collector < ? super T , A , R > collector ) { Callable < R > callable = ( ) -> stream . collect ( collector ) ; return ThreadMethods . forkJoinExecution ( callable , concurrencyConfiguration , stream . isParallel ( ) ) ; }
|
Executes collect on the provided stream using the provided collector . If the Stream is parallel it is executed using the custom pool else it is executed directly from the main thread .
|
36,875
|
public < T > Optional < T > min ( Stream < T > stream , Comparator < ? super T > comparator ) { Callable < Optional < T > > callable = ( ) -> stream . min ( comparator ) ; return ThreadMethods . forkJoinExecution ( callable , concurrencyConfiguration , stream . isParallel ( ) ) ; }
|
Executes min on the provided stream using the provided collector . If the Stream is parallel it is executed using the custom pool else it is executed directly from the main thread .
|
36,876
|
public double sum ( DoubleStream stream ) { Callable < Double > callable = ( ) -> stream . sum ( ) ; return ThreadMethods . forkJoinExecution ( callable , concurrencyConfiguration , stream . isParallel ( ) ) ; }
|
Executes sum on the provided DoubleStream using the provided collector . If the Stream is parallel it is executed using the custom pool else it is executed directly from the main thread .
|
36,877
|
public void save ( String storageName ) { storageEngine . saveObject ( "modelParameters" , modelParameters ) ; storageEngine . saveObject ( "trainingParameters" , trainingParameters ) ; storageEngine . rename ( storageName ) ; modelParameters = ( MP ) storageEngine . loadObject ( "modelParameters" , ModelParameters . class ) ; }
|
Saves the KnowledgeBase using the storage engine .
|
36,878
|
protected AbstractTokenizer generateTokenizer ( ) { Class < ? extends AbstractTokenizer > tokenizer = parameters . getTokenizer ( ) ; if ( tokenizer == null ) { return null ; } try { return tokenizer . newInstance ( ) ; } catch ( InstantiationException | IllegalAccessException ex ) { throw new RuntimeException ( ex ) ; } }
|
Generates a new AbstractTokenizer object by using the provided tokenizer class .
|
36,879
|
public static < T extends AbstractTextExtractor , TP extends AbstractTextExtractor . AbstractParameters > T newInstance ( TP parameters ) { try { Class < T > tClass = ( Class < T > ) parameters . getClass ( ) . getEnclosingClass ( ) ; return tClass . getConstructor ( parameters . getClass ( ) ) . newInstance ( parameters ) ; } catch ( InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException ex ) { throw new RuntimeException ( ex ) ; } }
|
Generates a new instance of a AbstractTextExtractor by providing the Class of the AbstractTextExtractor .
|
36,880
|
public static AssociativeArray2D survivalFunction ( FlatDataCollection flatDataCollection ) { AssociativeArray2D survivalFunction = new AssociativeArray2D ( ) ; Queue < Double > censoredData = new PriorityQueue < > ( ) ; Queue < Double > uncensoredData = new PriorityQueue < > ( ) ; int n = flatDataCollection . size ( ) ; if ( n == 0 ) { throw new IllegalArgumentException ( "The provided collection can't be empty." ) ; } for ( Object value : flatDataCollection ) { String str = value . toString ( ) ; if ( str . endsWith ( CENSORED_NUMBER_POSTFIX ) ) { censoredData . add ( Double . valueOf ( str . substring ( 0 , str . length ( ) - CENSORED_NUMBER_POSTFIX . length ( ) ) ) ) ; } else { uncensoredData . add ( TypeInference . toDouble ( value ) ) ; } } Double currentCensored = null ; Double currentUncensored = null ; int i = 1 ; double previousUncensoredValue = 1.0 ; double varianceDenominator = 0.0 ; do { if ( currentCensored == null ) { currentCensored = censoredData . poll ( ) ; } if ( currentUncensored == null ) { currentUncensored = uncensoredData . poll ( ) ; } boolean isCensored = false ; String key ; if ( currentUncensored == null ) { key = currentCensored . toString ( ) . concat ( CENSORED_NUMBER_POSTFIX ) ; currentCensored = null ; isCensored = true ; } else if ( currentCensored == null ) { key = currentUncensored . toString ( ) ; currentUncensored = null ; } else if ( currentCensored < currentUncensored ) { key = currentCensored . toString ( ) . concat ( CENSORED_NUMBER_POSTFIX ) ; currentCensored = null ; isCensored = true ; } else { key = currentUncensored . toString ( ) ; currentUncensored = null ; } Integer previousMi = ( Integer ) survivalFunction . get2d ( key , "mi" ) ; if ( previousMi == null ) { previousMi = 0 ; } survivalFunction . put2d ( key , "i" , i ) ; survivalFunction . put2d ( key , "mi" , previousMi + 1 ) ; if ( isCensored == false ) { survivalFunction . put2d ( key , "r" , i ) ; double Sti = ( n - i ) / ( n - i + 1.0 ) * previousUncensoredValue ; if ( n - i > 0 ) { varianceDenominator += 1.0 / ( ( n - i ) * ( n - i + 1.0 ) ) ; } survivalFunction . put2d ( key , "Sti" , Sti ) ; survivalFunction . put2d ( key , "varianceSti" , Sti * Sti * varianceDenominator ) ; previousUncensoredValue = Sti ; } ++ i ; } while ( currentCensored != null || currentUncensored != null || ! censoredData . isEmpty ( ) || ! uncensoredData . isEmpty ( ) ) ; return survivalFunction ; }
|
Calculates the survivalFunction by processing the flatDataCollection with the censored internalData . The flatDataCollection contains numbers in string format . The Censored entries contain a + symbol at the end of the number .
|
36,881
|
public static double median ( AssociativeArray2D survivalFunction ) { Double ApointTi = null ; Double BpointTi = null ; int n = survivalFunction . size ( ) ; if ( n == 0 ) { throw new IllegalArgumentException ( "The provided collection can't be empty." ) ; } for ( Map . Entry < Object , AssociativeArray > entry : survivalFunction . entrySet ( ) ) { Object ti = entry . getKey ( ) ; AssociativeArray row = entry . getValue ( ) ; Double Sti = row . getDouble ( "Sti" ) ; if ( Sti == null ) { continue ; } Double point = Double . valueOf ( ti . toString ( ) ) ; if ( Math . abs ( Sti - 0.5 ) < 0.0000001 ) { return point ; } else if ( Sti > 0.5 ) { ApointTi = point ; } else { BpointTi = point ; break ; } } if ( n == 1 ) { return ( ApointTi != null ) ? ApointTi : BpointTi ; } else if ( ApointTi == null || BpointTi == null ) { throw new IllegalArgumentException ( "Invalid A and B points." ) ; } double ApointTiValue = TypeInference . toDouble ( survivalFunction . get2d ( ApointTi . toString ( ) , "Sti" ) ) ; double BpointTiValue = TypeInference . toDouble ( survivalFunction . get2d ( BpointTi . toString ( ) , "Sti" ) ) ; double median = BpointTi - ( BpointTiValue - 0.5 ) * ( BpointTi - ApointTi ) / ( BpointTiValue - ApointTiValue ) ; return median ; }
|
Calculates median .
|
36,882
|
private static double ar ( AssociativeArray2D survivalFunction , int r ) { if ( survivalFunction . isEmpty ( ) ) { throw new IllegalArgumentException ( "The provided collection can't be empty." ) ; } AssociativeArray2D survivalFunctionCopy = survivalFunction ; Map . Entry < Object , AssociativeArray > lastRowEntry = null ; for ( Map . Entry < Object , AssociativeArray > currentRowEntry : survivalFunction . entrySet ( ) ) { lastRowEntry = currentRowEntry ; } if ( lastRowEntry == null ) { throw new IllegalArgumentException ( "The last observation can't be censored." ) ; } AssociativeArray lastRow = lastRowEntry . getValue ( ) ; if ( lastRow . get ( "Sti" ) == null ) { survivalFunctionCopy = survivalFunction . copy ( ) ; Object lastRowKey = lastRowEntry . getKey ( ) ; AssociativeArray lastRowValue = survivalFunctionCopy . remove ( lastRowKey ) ; String str = lastRowKey . toString ( ) ; Double newLastRowKey = Double . valueOf ( str . substring ( 0 , str . length ( ) - CENSORED_NUMBER_POSTFIX . length ( ) ) ) ; newLastRowKey = Math . floor ( newLastRowKey ) + 1 ; survivalFunctionCopy . put2d ( newLastRowKey , "i" , lastRowValue . get ( "i" ) ) ; survivalFunctionCopy . put2d ( newLastRowKey , "r" , lastRowValue . get ( "i" ) ) ; survivalFunctionCopy . put2d ( newLastRowKey , "Sti" , 0.0 ) ; survivalFunctionCopy . put2d ( newLastRowKey , "varianceSti" , 0.0 ) ; } double Ar = 0.0 ; double StiPrevious = 1 ; double tiPrevious = 0 ; for ( Map . Entry < Object , AssociativeArray > entry : survivalFunctionCopy . entrySet ( ) ) { Object ti = entry . getKey ( ) ; AssociativeArray row = entry . getValue ( ) ; Double Sti = row . getDouble ( "Sti" ) ; if ( Sti == null ) { continue ; } double tiCurrent = Double . valueOf ( ti . toString ( ) ) ; if ( row . getDouble ( "r" ) > r ) { Ar += StiPrevious * ( tiCurrent - tiPrevious ) ; } StiPrevious = Sti ; tiPrevious = tiCurrent ; } return Ar ; }
|
Ar function used to estimate mean and variance .
|
36,883
|
public static double meanVariance ( AssociativeArray2D survivalFunction ) { double meanVariance = 0 ; int m = 0 ; int n = 0 ; for ( Map . Entry < Object , AssociativeArray > entry : survivalFunction . entrySet ( ) ) { AssociativeArray row = entry . getValue ( ) ; Number mi = ( Number ) row . get ( "mi" ) ; n += mi . intValue ( ) ; if ( row . get ( "Sti" ) == null ) { m += mi . intValue ( ) ; } } for ( Map . Entry < Object , AssociativeArray > entry : survivalFunction . entrySet ( ) ) { AssociativeArray row = entry . getValue ( ) ; if ( row . get ( "Sti" ) == null ) { continue ; } Number mi = ( Number ) row . get ( "mi" ) ; Number r = ( Number ) row . get ( "r" ) ; double Ar = ar ( survivalFunction , r . intValue ( ) ) ; if ( n - r . intValue ( ) > 0 ) { meanVariance += mi . intValue ( ) * ( Ar * Ar ) / ( ( n - r . intValue ( ) ) * ( n - r . intValue ( ) + 1.0 ) ) ; } } meanVariance *= m / ( m - 1.0 ) ; return meanVariance ; }
|
Calculates the Variance of Mean .
|
36,884
|
public VM validate ( Iterator < Split > dataSplits , TrainingParameters trainingParameters ) { AbstractModeler modeler = MLBuilder . create ( trainingParameters , configuration ) ; List < VM > validationMetricsList = new LinkedList < > ( ) ; while ( dataSplits . hasNext ( ) ) { Split s = dataSplits . next ( ) ; Dataframe trainData = s . getTrain ( ) ; Dataframe testData = s . getTest ( ) ; modeler . fit ( trainData ) ; trainData . close ( ) ; modeler . predict ( testData ) ; VM entrySample = ValidationMetrics . newInstance ( vmClass , testData ) ; testData . close ( ) ; validationMetricsList . add ( entrySample ) ; } modeler . close ( ) ; VM avgValidationMetrics = ValidationMetrics . newInstance ( vmClass , validationMetricsList ) ; return avgValidationMetrics ; }
|
Estimates the average validation metrics on the provided data splits .
|
36,885
|
public static < K > void updateWeights ( double l1 , double learningRate , Map < K , Double > weights , Map < K , Double > newWeights ) { if ( l1 > 0.0 ) { for ( Map . Entry < K , Double > e : newWeights . entrySet ( ) ) { K column = e . getKey ( ) ; double wi_k_intermediate = e . getValue ( ) ; if ( wi_k_intermediate > 0.0 ) { newWeights . put ( column , Math . max ( 0.0 , wi_k_intermediate - l1 * wi_k_intermediate ) ) ; } else if ( wi_k_intermediate < 0.0 ) { newWeights . put ( column , Math . min ( 0.0 , wi_k_intermediate + l1 * wi_k_intermediate ) ) ; } } } }
|
Updates the weights by applying the L1 regularization .
|
36,886
|
public static < K > double estimatePenalty ( double l1 , Map < K , Double > weights ) { double penalty = 0.0 ; if ( l1 > 0.0 ) { double sumAbsWeights = 0.0 ; for ( double w : weights . values ( ) ) { sumAbsWeights += Math . abs ( w ) ; } penalty = l1 * sumAbsWeights ; } return penalty ; }
|
Estimates the penalty by adding the L1 regularization .
|
36,887
|
public static DataType getDataType ( Object v ) { if ( DataType . BOOLEAN . isInstance ( v ) ) { return DataType . BOOLEAN ; } else if ( DataType . ORDINAL . isInstance ( v ) ) { return DataType . ORDINAL ; } else if ( DataType . NUMERICAL . isInstance ( v ) ) { return DataType . NUMERICAL ; } else if ( DataType . CATEGORICAL . isInstance ( v ) ) { return DataType . CATEGORICAL ; } else { return null ; } }
|
Detects the DataType of a particular value .
|
36,888
|
public static Double toDouble ( Object v ) { if ( v == null ) { return null ; } if ( v instanceof Boolean ) { return ( ( Boolean ) v ) ? 1.0 : 0.0 ; } return ( ( Number ) v ) . doubleValue ( ) ; }
|
Converts safely any Number to Double .
|
36,889
|
public static Integer toInteger ( Object v ) { if ( v == null ) { return null ; } if ( v instanceof Boolean ) { return ( ( Boolean ) v ) ? 1 : 0 ; } return ( ( Number ) v ) . intValue ( ) ; }
|
Converts safely any Number to Integer .
|
36,890
|
public static double calculateScore ( FlatDataList errorList ) { double DWdeltasquare = 0 ; double DWetsquare = 0 ; int n = errorList . size ( ) ; for ( int i = 0 ; i < n ; ++ i ) { Double error = errorList . getDouble ( i ) ; if ( i >= 1 ) { Double errorPrevious = errorList . getDouble ( i - 1 ) ; if ( errorPrevious != null ) { DWdeltasquare += Math . pow ( error - errorPrevious , 2 ) ; } } DWetsquare += error * error ; } double DW = DWdeltasquare / DWetsquare ; return DW ; }
|
Calculates DW score
|
36,891
|
public final Iterator < Double > iteratorDouble ( ) { return new Iterator < Double > ( ) { private final Iterator < Object > objectIterator = ( Iterator < Object > ) internalData . iterator ( ) ; public boolean hasNext ( ) { return objectIterator . hasNext ( ) ; } public Double next ( ) { return TypeInference . toDouble ( objectIterator . next ( ) ) ; } public void remove ( ) { objectIterator . remove ( ) ; } } ; }
|
Iterator which casts the values of the Data Structure from Object to Double . This iterator should be used only when the underling Data Structure contains Numeric or Boolean values . Accessing this iterator when other data types are stored will lead to an Exception .
|
36,892
|
public static double euclidean ( AssociativeArray a1 , AssociativeArray a2 ) { Map < Object , Double > columnDistances = columnDistances ( a1 , a2 , null ) ; double distance = 0.0 ; for ( double columnDistance : columnDistances . values ( ) ) { distance += ( columnDistance * columnDistance ) ; } return Math . sqrt ( distance ) ; }
|
Estimates the euclidean distance of two Associative Arrays .
|
36,893
|
public static double euclideanWeighted ( AssociativeArray a1 , AssociativeArray a2 , Map < Object , Double > columnWeights ) { Map < Object , Double > columnDistances = columnDistances ( a1 , a2 , columnWeights . keySet ( ) ) ; double distance = 0.0 ; for ( Map . Entry < Object , Double > entry : columnDistances . entrySet ( ) ) { double columnDistance = entry . getValue ( ) ; distance += ( columnDistance * columnDistance ) * columnWeights . get ( entry . getKey ( ) ) ; } return Math . sqrt ( distance ) ; }
|
Estimates the weighted euclidean distance of two Associative Arrays .
|
36,894
|
public static double manhattan ( AssociativeArray a1 , AssociativeArray a2 ) { Map < Object , Double > columnDistances = columnDistances ( a1 , a2 , null ) ; double distance = 0.0 ; for ( double columnDistance : columnDistances . values ( ) ) { distance += Math . abs ( columnDistance ) ; } return distance ; }
|
Estimates the manhattan distance of two Associative Arrays .
|
36,895
|
public static double manhattanWeighted ( AssociativeArray a1 , AssociativeArray a2 , Map < Object , Double > columnWeights ) { Map < Object , Double > columnDistances = columnDistances ( a1 , a2 , columnWeights . keySet ( ) ) ; double distance = 0.0 ; for ( Map . Entry < Object , Double > entry : columnDistances . entrySet ( ) ) { distance += Math . abs ( entry . getValue ( ) ) * columnWeights . get ( entry . getKey ( ) ) ; } return distance ; }
|
Estimates the weighted manhattan distance of two Associative Arrays .
|
36,896
|
public static AssociativeArray getRanksFromValues ( FlatDataList flatDataCollection ) { AssociativeArray tiesCounter = new AssociativeArray ( ) ; Map < Object , Double > key2AvgRank = new LinkedHashMap < > ( ) ; _buildRankArrays ( flatDataCollection , tiesCounter , key2AvgRank ) ; int i = 0 ; for ( Object value : flatDataCollection ) { flatDataCollection . set ( i ++ , key2AvgRank . get ( value ) ) ; } return tiesCounter ; }
|
Replaces the actual values of the flatDataCollection with their ranks and returns in the tieCounter the keys that occur more than once and the number of occurrences . The tieCounter does not store the list and ranks of the actual ties because we never use them .
|
36,897
|
public static AssociativeArray getRanksFromValues ( AssociativeArray associativeArray ) { AssociativeArray tiesCounter = new AssociativeArray ( ) ; Map < Object , Double > key2AvgRank = new LinkedHashMap < > ( ) ; _buildRankArrays ( associativeArray . toFlatDataList ( ) , tiesCounter , key2AvgRank ) ; for ( Map . Entry < Object , Object > entry : associativeArray . entrySet ( ) ) { associativeArray . put ( entry . getKey ( ) , key2AvgRank . get ( entry . getValue ( ) ) ) ; } return tiesCounter ; }
|
Replaces the actual values of the associativeArray with their ranks and returns in the tieCounter the keys that occur more than once and the number of occurrences . The tieCounter does not store the list and ranks of the actual ties because we never use them .
|
36,898
|
public void fit ( Map < Object , URI > datasets ) { TrainingParameters tp = ( TrainingParameters ) knowledgeBase . getTrainingParameters ( ) ; Dataframe trainingData = Dataframe . Builder . parseTextFiles ( datasets , AbstractTextExtractor . newInstance ( tp . getTextExtractorParameters ( ) ) , knowledgeBase . getConfiguration ( ) ) ; fit ( trainingData ) ; trainingData . close ( ) ; }
|
Trains a Machine Learning modeler using the provided dataset files . The data map should have as index the names of each class and as values the URIs of the training files . The training files should contain one training example per row .
|
36,899
|
public Dataframe predict ( URI datasetURI ) { Map < Object , URI > dataset = new HashMap < > ( ) ; dataset . put ( null , datasetURI ) ; TrainingParameters trainingParameters = ( TrainingParameters ) knowledgeBase . getTrainingParameters ( ) ; Dataframe testDataset = Dataframe . Builder . parseTextFiles ( dataset , AbstractTextExtractor . newInstance ( trainingParameters . getTextExtractorParameters ( ) ) , knowledgeBase . getConfiguration ( ) ) ; predict ( testDataset ) ; return testDataset ; }
|
Generates a Dataframe with the predictions for the provided data file . The data file should contain the text of one observation per row .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.