idx
int64 0
41.2k
| question
stringlengths 73
5.81k
| target
stringlengths 5
918
|
|---|---|---|
2,800
|
private static void invokeMatlab ( File dataMatrixFile , File affMatrixFile , int dimensions , File outputFile ) throws IOException { String commandLine = "matlab -nodisplay -nosplash -nojvm" ; LOGGER . fine ( commandLine ) ; Process matlab = Runtime . getRuntime ( ) . exec ( commandLine ) ; String outputStr = "save " + outputFile . getAbsolutePath ( ) + " projection -ASCII\n" ; String matlabProgram = String . format ( SR_LPP_M , dataMatrixFile . getAbsolutePath ( ) , affMatrixFile . getAbsolutePath ( ) , dimensions , outputStr ) ; PrintWriter stdin = new PrintWriter ( matlab . getOutputStream ( ) ) ; BufferedReader stdout = new BufferedReader ( new InputStreamReader ( matlab . getInputStream ( ) ) ) ; BufferedReader stderr = new BufferedReader ( new InputStreamReader ( matlab . getErrorStream ( ) ) ) ; stdin . println ( matlabProgram ) ; stdin . close ( ) ; StringBuilder output = new StringBuilder ( "Matlab LPP output:\n" ) ; for ( String line = null ; ( line = stdout . readLine ( ) ) != null ; ) { output . append ( line ) . append ( "\n" ) ; if ( line . equals ( "Finished" ) ) { matlab . destroy ( ) ; } } LOGGER . fine ( output . toString ( ) ) ; int exitStatus = - 1 ; try { exitStatus = matlab . waitFor ( ) ; } catch ( InterruptedException ie ) { throw new Error ( ie ) ; } LOGGER . fine ( "Octave LPP exit status: " + exitStatus ) ; if ( exitStatus != 0 ) { StringBuilder sb = new StringBuilder ( ) ; for ( String line = null ; ( line = stderr . readLine ( ) ) != null ; ) { sb . append ( line ) . append ( "\n" ) ; } throw new IllegalStateException ( "Matlab LPP did not finish normally: " + sb ) ; } }
|
Invokes Matlab to run the LPP script
|
2,801
|
private static void invokeOctave ( File dataMatrixFile , File affMatrixFile , int dimensions , File outputFile ) throws IOException { File octaveFile = File . createTempFile ( "octave-LPP" , ".m" ) ; String outputStr = "save(\"-ascii\", \"" + outputFile . getAbsolutePath ( ) + "\", \"projection\");\n" ; String octaveProgram = null ; try { octaveProgram = String . format ( SR_LPP_M , dataMatrixFile . getAbsolutePath ( ) , affMatrixFile . getAbsolutePath ( ) , dimensions , outputStr ) ; } catch ( Throwable t ) { t . printStackTrace ( ) ; } PrintWriter pw = new PrintWriter ( octaveFile ) ; pw . println ( octaveProgram ) ; pw . close ( ) ; String commandLine = "octave " + octaveFile . getAbsolutePath ( ) ; LOGGER . fine ( commandLine ) ; Process octave = Runtime . getRuntime ( ) . exec ( commandLine ) ; BufferedReader stdout = new BufferedReader ( new InputStreamReader ( octave . getInputStream ( ) ) ) ; BufferedReader stderr = new BufferedReader ( new InputStreamReader ( octave . getErrorStream ( ) ) ) ; StringBuilder output = new StringBuilder ( "Octave LPP output:\n" ) ; for ( String line = null ; ( line = stdout . readLine ( ) ) != null ; ) { output . append ( line ) . append ( "\n" ) ; } LOGGER . fine ( output . toString ( ) ) ; int exitStatus = - 1 ; try { exitStatus = octave . waitFor ( ) ; } catch ( InterruptedException ie ) { throw new Error ( ie ) ; } LOGGER . fine ( "Octave LPP exit status: " + exitStatus ) ; if ( exitStatus != 0 ) { StringBuilder sb = new StringBuilder ( ) ; for ( String line = null ; ( line = stderr . readLine ( ) ) != null ; ) { sb . append ( line ) . append ( "\n" ) ; } throw new IllegalStateException ( "Octave LPP did not finish normally: " + sb ) ; } }
|
Invokes Octave to run the LPP script
|
2,802
|
public static void setLevel ( Level outputLevel ) { Logger appRooLogger = Logger . getLogger ( "edu.ucla.sspace" ) ; Handler verboseHandler = new ConsoleHandler ( ) ; verboseHandler . setLevel ( outputLevel ) ; appRooLogger . addHandler ( verboseHandler ) ; appRooLogger . setLevel ( outputLevel ) ; appRooLogger . setUseParentHandlers ( false ) ; }
|
Sets the output level of the S - Space package according to the desired level .
|
2,803
|
private void advance ( ) { try { while ( true ) { if ( curLine == null || ! matcher . find ( ) ) { String line = br . readLine ( ) ; if ( line == null ) { next = null ; br . close ( ) ; return ; } matcher = notWhiteSpace . matcher ( line ) ; curLine = line ; if ( ! matcher . find ( ) ) continue ; } next = curLine . substring ( matcher . start ( ) , matcher . end ( ) ) ; break ; } } catch ( IOException ioe ) { throw new IOError ( ioe ) ; } }
|
Advances to the next word in the buffer .
|
2,804
|
private void checkIndices ( int row , int col , boolean expand ) { if ( row < 0 || col < 0 ) { throw new ArrayIndexOutOfBoundsException ( ) ; } if ( expand ) { int r = row + 1 ; int cur = 0 ; while ( r > ( cur = rows . get ( ) ) && ! rows . compareAndSet ( cur , r ) ) ; int c = col + 1 ; cur = 0 ; while ( c > ( cur = cols . get ( ) ) && ! cols . compareAndSet ( cur , c ) ) ; } }
|
Verify that the given row and column value is non - negative and optionally expand the size of the matrix if the row or column are outside the current bounds .
|
2,805
|
private static int index ( Object o ) { Integer i = TYPE_INDICES . get ( o ) ; if ( i == null ) { synchronized ( TYPE_INDICES ) { i = TYPE_INDICES . get ( o ) ; if ( i != null ) return i ; else { int j = TYPE_INDICES . size ( ) ; TYPE_INDICES . put ( o , j ) ; TYPES . add ( o ) ; return j ; } } } return i ; }
|
Returns the index for the given type creating a new index if necessary
|
2,806
|
private void addThread ( ) { Thread t = new WorkerThread ( workQueue ) ; threads . add ( t ) ; t . start ( ) ; }
|
Increases the number of concurrently processing threads by one .
|
2,807
|
public long getRemainingTasks ( Object taskGroupId ) { CountDownLatch latch = taskKeyToLatch . get ( taskGroupId ) ; return ( latch == null ) ? 0 : latch . getCount ( ) ; }
|
Returns the number of tasks that need to be completed before the group associated with the key is complete . Note that this number includes both those tasks running and not yet completed as well as tasks that have yet to be enqueued on behalf of this id .
|
2,808
|
public Object registerTaskGroup ( int numTasks ) { Object key = new Object ( ) ; taskKeyToLatch . putIfAbsent ( key , new CountDownLatch ( numTasks ) ) ; return key ; }
|
Registers a new task group with the specified number of tasks to execute and returns a task group identifier to use when registering its tasks .
|
2,809
|
public void run ( Collection < Runnable > tasks ) { int numTasks = tasks . size ( ) ; CountDownLatch latch = new CountDownLatch ( numTasks ) ; for ( Runnable r : tasks ) { if ( r == null ) throw new NullPointerException ( "Cannot run null tasks" ) ; workQueue . offer ( new CountingRunnable ( r , latch ) ) ; } try { latch . await ( ) ; } catch ( InterruptedException ie ) { throw new IllegalStateException ( "Not all tasks finished" , ie ) ; } }
|
Executes the tasks using a thread pool and returns once all tasks have finished .
|
2,810
|
@ SuppressWarnings ( "unchecked" ) public static < T > T getObjectInstance ( String className ) { try { Class clazz = Class . forName ( className ) ; return ( T ) clazz . newInstance ( ) ; } catch ( Exception e ) { throw new Error ( e ) ; } }
|
Returns an arbitrary object instance based on a class name .
|
2,811
|
public static < T extends Vector > double getSimilarity ( SimType similarityType , T a , T b ) { switch ( similarityType ) { case COSINE : return cosineSimilarity ( a , b ) ; case PEARSON_CORRELATION : return correlation ( a , b ) ; case EUCLIDEAN : return euclideanSimilarity ( a , b ) ; case SPEARMAN_RANK_CORRELATION : return spearmanRankCorrelationCoefficient ( a , b ) ; case JACCARD_INDEX : return jaccardIndex ( a , b ) ; case AVERAGE_COMMON_FEATURE_RANK : return averageCommonFeatureRank ( a , b ) ; case LIN : return linSimilarity ( a , b ) ; case KL_DIVERGENCE : return klDivergence ( a , b ) ; case KENDALLS_TAU : return kendallsTau ( a , b ) ; case TANIMOTO_COEFFICIENT : return tanimotoCoefficient ( a , b ) ; } return 0 ; }
|
Calculates the similarity of the two vectors using the provided similarity measure .
|
2,812
|
public static double cosineSimilarity ( double [ ] a , double [ ] b ) { check ( a , b ) ; double dotProduct = 0.0 ; double aMagnitude = 0.0 ; double bMagnitude = 0.0 ; for ( int i = 0 ; i < b . length ; i ++ ) { double aValue = a [ i ] ; double bValue = b [ i ] ; aMagnitude += aValue * aValue ; bMagnitude += bValue * bValue ; dotProduct += aValue * bValue ; } aMagnitude = Math . sqrt ( aMagnitude ) ; bMagnitude = Math . sqrt ( bMagnitude ) ; return ( aMagnitude == 0 || bMagnitude == 0 ) ? 0 : dotProduct / ( aMagnitude * bMagnitude ) ; }
|
Returns the cosine similarity of the two arrays .
|
2,813
|
public static double spearmanRankCorrelationCoefficient ( double [ ] a , double [ ] b ) { check ( a , b ) ; int N = a . length ; int NcubedMinusN = ( N * N * N ) - N ; double [ ] rankedA = rank ( a ) ; double [ ] rankedB = rank ( b ) ; double sumDiffs = 0 ; for ( int i = 0 ; i < rankedA . length - 1 ; ++ i ) { double diff = rankedA [ i ] - rankedB [ i ] ; sumDiffs += diff * diff ; } double aCorrectionFactor = rankedA [ rankedA . length - 1 ] ; double bCorrectionFactor = rankedB [ rankedB . length - 1 ] ; double tiesSum = aCorrectionFactor + bCorrectionFactor ; return ( NcubedMinusN - ( 6 * sumDiffs ) - ( ( tiesSum ) / 2d ) ) / Math . sqrt ( ( NcubedMinusN * NcubedMinusN ) - ( tiesSum * NcubedMinusN ) + ( aCorrectionFactor * bCorrectionFactor ) ) ; }
|
Computes the Spearman rank correlation coefficient for the two arrays .
|
2,814
|
public Set < T > types ( ) { Set < T > types = new HashSet < T > ( ) ; for ( Object o : edges . values ( ) ) { Set < T > s = ( Set < T > ) o ; types . addAll ( s ) ; } return types ; }
|
Returns the set of types contained within this set
|
2,815
|
public static double mean ( Collection < ? extends Number > values ) { double sum = 0d ; for ( Number n : values ) sum += n . doubleValue ( ) ; return sum / values . size ( ) ; }
|
Returns the mean value of the collection of numbers
|
2,816
|
public static double mean ( int [ ] values ) { double sum = 0d ; for ( int i : values ) sum += i ; return sum / values . length ; }
|
Returns the mean value of the array of ints
|
2,817
|
@ SuppressWarnings ( "unchecked" ) public static < T extends Number & Comparable > T median ( Collection < T > values ) { if ( values . isEmpty ( ) ) throw new IllegalArgumentException ( "No median in an empty collection" ) ; List < T > sorted = new ArrayList < T > ( values ) ; Collections . sort ( sorted ) ; return sorted . get ( sorted . size ( ) / 2 ) ; }
|
Returns the median value of the collection of numbers
|
2,818
|
public static double median ( int [ ] values ) { if ( values . length == 0 ) throw new IllegalArgumentException ( "No median in an empty array" ) ; int [ ] sorted = Arrays . copyOf ( values , values . length ) ; Arrays . sort ( sorted ) ; return sorted [ sorted . length / 2 ] ; }
|
Returns the median value of the array of ints
|
2,819
|
public static < T extends Number > T mode ( Collection < T > values ) { if ( values . isEmpty ( ) ) throw new IllegalArgumentException ( "No mode in an empty collection" ) ; Counter < T > c = new ObjectCounter < T > ( ) ; for ( T n : values ) c . count ( n ) ; return c . max ( ) ; }
|
Returns the mode value of the collection of numbers
|
2,820
|
public static int mode ( int [ ] values ) { if ( values . length == 0 ) throw new IllegalArgumentException ( "No mode in an empty array" ) ; Counter < Integer > c = new ObjectCounter < Integer > ( ) ; for ( int i : values ) c . count ( i ) ; return c . max ( ) ; }
|
Returns the mode value of the array of ints
|
2,821
|
public static double mode ( double [ ] values ) { if ( values . length == 0 ) throw new IllegalArgumentException ( "No mode in an empty array" ) ; Counter < Double > c = new ObjectCounter < Double > ( ) ; for ( double d : values ) c . count ( d ) ; return c . max ( ) ; }
|
Returns the mode value of the array of doubles
|
2,822
|
public static double stddev ( Collection < ? extends Number > values ) { double mean = mean ( values ) ; double sum = 0d ; for ( Number n : values ) { double d = n . doubleValue ( ) - mean ; sum += d * d ; } return Math . sqrt ( sum / values . size ( ) ) ; }
|
Returns the standard deviation of the collection of numbers
|
2,823
|
public static double stddev ( int [ ] values ) { double mean = mean ( values ) ; double sum = 0d ; for ( int i : values ) { double d = i - mean ; sum += d * d ; } return Math . sqrt ( sum / values . length ) ; }
|
Returns the standard deviation of the values in the int array
|
2,824
|
public static double sum ( Collection < ? extends Number > values ) { double sum = 0d ; for ( Number n : values ) sum += n . doubleValue ( ) ; return sum ; }
|
Returns the sum of the collection of numbers
|
2,825
|
private < E extends Edge > double getConnectionSimilarity ( Graph < E > graph , Edge e1 , Edge e2 ) { int e1to = e1 . to ( ) ; int e1from = e1 . from ( ) ; int e2to = e2 . to ( ) ; int e2from = e2 . from ( ) ; if ( e1to == e2to ) return getConnectionSimilarity ( graph , e1to , e1from , e2from ) ; else if ( e1to == e2from ) return getConnectionSimilarity ( graph , e1to , e1from , e2to ) ; else if ( e1from == e2to ) return getConnectionSimilarity ( graph , e1from , e1to , e2from ) ; else if ( e1from == e2from ) return getConnectionSimilarity ( graph , e1from , e1to , e2to ) ; else return 0 ; }
|
Computes the connection similarity for the two edges first calculating the impost and keystones nodes . If the edges are not connected returns 0 .
|
2,826
|
private void addIntermediateNode ( Node < V > original , int numOverlappingCharacters , String key , int indexOfStartOfOverlap , V value ) { char [ ] originalPrefix = original . prefix ; char distinguishing = originalPrefix [ numOverlappingCharacters ] ; char [ ] remainingPrefix = Arrays . copyOfRange ( originalPrefix , numOverlappingCharacters + 1 , originalPrefix . length ) ; char [ ] overlappingPrefix = Arrays . copyOfRange ( originalPrefix , 0 , numOverlappingCharacters ) ; Node < V > child = new Node < V > ( remainingPrefix , original . value ) ; child . children = original . children ; original . prefix = overlappingPrefix ; original . children = new CharMap < Node < V > > ( ) ; original . addChild ( distinguishing , child ) ; int remainingKeyChars = key . length ( ) - indexOfStartOfOverlap ; if ( numOverlappingCharacters == remainingKeyChars ) { original . value = value ; } else { int prefixStart = indexOfStartOfOverlap + numOverlappingCharacters + 1 ; char mappingKey = key . charAt ( indexOfStartOfOverlap + numOverlappingCharacters ) ; char [ ] remainingKey = new char [ key . length ( ) - prefixStart ] ; for ( int i = 0 ; i < remainingKey . length ; ++ i ) { remainingKey [ i ] = key . charAt ( prefixStart + i ) ; } Node < V > newMapping = new Node < V > ( remainingKey , value ) ; original . addChild ( mappingKey , newMapping ) ; original . value = null ; } }
|
Creates a series of children under the provided node moving the value that was mapped to this node to the appropriate terminal node in the series and finally creating a new node at the end to hold the new key - value mapping .
|
2,827
|
public < E extends Edge > double [ ] compute ( Graph < E > g ) { if ( ! hasContiguousVertices ( g ) ) throw new IllegalArgumentException ( "Vertices must be in continugous order" ) ; double [ ] centralities = new double [ g . order ( ) ] ; IntIterator vertexIter = g . vertices ( ) . iterator ( ) ; while ( vertexIter . hasNext ( ) ) { int s = vertexIter . nextInt ( ) ; Deque < Integer > S = new ArrayDeque < Integer > ( ) ; List < List < Integer > > P = new ArrayList < List < Integer > > ( g . order ( ) ) ; for ( int i = 0 ; i < g . order ( ) ; ++ i ) P . add ( new ArrayList < Integer > ( ) ) ; double [ ] sigma = new double [ g . order ( ) ] ; sigma [ s ] = 1 ; double [ ] d = new double [ g . order ( ) ] ; Arrays . fill ( d , - 1 ) ; d [ s ] = 0 ; Queue < Integer > Q = new ArrayDeque < Integer > ( ) ; Q . add ( s ) ; while ( ! Q . isEmpty ( ) ) { int v = Q . poll ( ) ; S . offer ( v ) ; IntIterator neighborIter = g . getNeighbors ( v ) . iterator ( ) ; while ( neighborIter . hasNext ( ) ) { int w = neighborIter . nextInt ( ) ; if ( d [ w ] < 0 ) { Q . offer ( w ) ; d [ w ] = d [ v ] + 1 ; } if ( d [ w ] == d [ v ] + 1 ) { sigma [ w ] += sigma [ v ] ; P . get ( w ) . add ( v ) ; } } } double [ ] delta = new double [ g . order ( ) ] ; while ( ! S . isEmpty ( ) ) { int w = S . pollLast ( ) ; for ( int v : P . get ( w ) ) { delta [ v ] += ( sigma [ v ] / sigma [ w ] ) * ( 1 + delta [ w ] ) ; } if ( w != s ) { centralities [ w ] += delta [ w ] ; } } } return centralities ; }
|
Returns a mapping from each vertex to its betweenness centrality measure .
|
2,828
|
private Matrix getEdgeSimMatrix ( List < Edge > edgeList , SparseMatrix sm , boolean keepSimilarityMatrixInMemory ) { return ( keepSimilarityMatrixInMemory ) ? calculateEdgeSimMatrix ( edgeList , sm ) : new LazySimilarityMatrix ( edgeList , sm ) ; }
|
Returns the edge similarity matrix for the edges in the provided sparse matrix .
|
2,829
|
private Matrix calculateEdgeSimMatrix ( final List < Edge > edgeList , final SparseMatrix sm ) { final int numEdges = edgeList . size ( ) ; final Matrix edgeSimMatrix = new SparseSymmetricMatrix ( new SparseHashMatrix ( numEdges , numEdges ) ) ; Object key = workQueue . registerTaskGroup ( numEdges ) ; for ( int i = 0 ; i < numEdges ; ++ i ) { final int row = i ; workQueue . add ( key , new Runnable ( ) { public void run ( ) { for ( int j = row ; j < numEdges ; ++ j ) { Edge e1 = edgeList . get ( row ) ; Edge e2 = edgeList . get ( j ) ; double sim = getEdgeSimilarity ( sm , e1 , e2 ) ; if ( sim > 0 ) { edgeSimMatrix . set ( row , j , sim ) ; } } } } ) ; } workQueue . await ( key ) ; return edgeSimMatrix ; }
|
Calculates the similarity matrix for the edges . The similarity matrix is symmetric .
|
2,830
|
private static MultiMap < Integer , Integer > convertMergesToAssignments ( List < Merge > merges , int numOriginalClusters ) { MultiMap < Integer , Integer > clusterToElements = new HashMultiMap < Integer , Integer > ( ) ; for ( int i = 0 ; i < numOriginalClusters ; ++ i ) clusterToElements . put ( i , i ) ; for ( Merge m : merges ) { clusterToElements . putMany ( m . remainingCluster ( ) , clusterToElements . remove ( m . mergedCluster ( ) ) ) ; } return clusterToElements ; }
|
Converts a series of merges to cluster assignments . Cluster assignments are assumed to start at 0 .
|
2,831
|
private static int [ ] getImpostNeighbors ( SparseMatrix sm , int rowIndex ) { int [ ] impost1edges = sm . getRowVector ( rowIndex ) . getNonZeroIndices ( ) ; int [ ] neighbors = Arrays . copyOf ( impost1edges , impost1edges . length + 1 ) ; neighbors [ neighbors . length - 1 ] = rowIndex ; return neighbors ; }
|
Returns an array containing the row indices of the neighbors of the impost node and the row index of the impost node itself .
|
2,832
|
public double getSolutionDensity ( int solutionNum ) { if ( solutionNum < 0 || solutionNum >= mergeOrder . size ( ) ) { throw new IllegalArgumentException ( "not a valid solution: " + solutionNum ) ; } if ( mergeOrder == null || edgeList == null ) { throw new IllegalStateException ( "initial clustering solution is not valid yet" ) ; } int numEdges = edgeList . size ( ) ; List < Merge > mergeSteps = mergeOrder . subList ( 0 , solutionNum ) ; MultiMap < Integer , Integer > clusterToElements = convertMergesToAssignments ( mergeSteps , numEdges ) ; double partitionDensitySum = 0d ; for ( Integer cluster : clusterToElements . keySet ( ) ) { Set < Integer > linkPartition = clusterToElements . get ( cluster ) ; int numLinks = linkPartition . size ( ) ; BitSet nodesInPartition = new BitSet ( numRows ) ; for ( Integer linkIndex : linkPartition ) { Edge link = edgeList . get ( linkIndex ) ; nodesInPartition . set ( link . from ) ; nodesInPartition . set ( link . to ) ; } int numNodes = nodesInPartition . cardinality ( ) ; double partitionDensity = ( numLinks - ( numNodes - 1d ) ) / ( ( ( numNodes * ( numNodes - 1d ) ) / 2d ) - ( numLinks - 1 ) ) ; partitionDensitySum += partitionDensity ; } double partitionDensity = ( 2d / numEdges ) * partitionDensitySum ; return partitionDensity ; }
|
Returns the partition density of the clustering solution .
|
2,833
|
public Assignments getSolution ( int solutionNum ) { if ( solutionNum < 0 || solutionNum >= mergeOrder . size ( ) ) { throw new IllegalArgumentException ( "not a valid solution: " + solutionNum ) ; } if ( mergeOrder == null || edgeList == null ) { throw new IllegalStateException ( "initial clustering solution is not valid yet" ) ; } int numEdges = edgeList . size ( ) ; MultiMap < Integer , Integer > bestEdgeAssignment = convertMergesToAssignments ( mergeOrder . subList ( 0 , solutionNum ) , numEdges ) ; List < Set < Integer > > nodeClusters = new ArrayList < Set < Integer > > ( numRows ) ; for ( int i = 0 ; i < numRows ; ++ i ) nodeClusters . add ( new HashSet < Integer > ( ) ) ; int clusterId = 0 ; for ( Integer cluster : bestEdgeAssignment . keySet ( ) ) { Set < Integer > edgePartition = bestEdgeAssignment . get ( cluster ) ; for ( Integer edgeId : edgePartition ) { Edge e = edgeList . get ( edgeId ) ; nodeClusters . get ( e . from ) . add ( clusterId ) ; nodeClusters . get ( e . to ) . add ( clusterId ) ; } clusterId ++ ; } Assignment [ ] nodeAssignments = new Assignment [ numRows ] ; for ( int i = 0 ; i < nodeAssignments . length ; ++ i ) nodeAssignments [ i ] = new SoftAssignment ( nodeClusters . get ( i ) ) ; return new Assignments ( clusterId , nodeAssignments ) ; }
|
Returns the clustering solution after the specified number of merge steps .
|
2,834
|
public static Matrix average ( Matrix m , Dimension dim ) { Matrix averageMatrix = null ; if ( dim == Dimension . ALL ) { double average = 0 ; for ( int i = 0 ; i < m . rows ( ) ; ++ i ) { for ( int j = 0 ; j < m . columns ( ) ; ++ j ) average += m . get ( i , j ) ; } averageMatrix = new ArrayMatrix ( 1 , 1 ) ; average = average / ( double ) ( m . rows ( ) * m . columns ( ) ) ; averageMatrix . set ( 1 , 1 , average ) ; } else if ( dim == Dimension . ROW ) { averageMatrix = new ArrayMatrix ( m . rows ( ) , 1 ) ; for ( int i = 0 ; i < m . rows ( ) ; ++ i ) { double average = 0 ; for ( int j = 0 ; j < m . columns ( ) ; ++ j ) average += m . get ( i , j ) ; average = average / ( double ) m . columns ( ) ; averageMatrix . set ( i , 0 , average ) ; } } else if ( dim == Dimension . COLUMN ) { averageMatrix = new ArrayMatrix ( 1 , m . columns ( ) ) ; for ( int i = 0 ; i < m . rows ( ) ; ++ i ) { for ( int j = 0 ; j < m . columns ( ) ; ++ j ) { double newValue = m . get ( i , j ) + averageMatrix . get ( 0 , j ) ; averageMatrix . set ( 0 , j , newValue ) ; } } for ( int i = 0 ; i < m . columns ( ) ; ++ i ) { double average = averageMatrix . get ( 0 , i ) ; average = average / ( double ) m . rows ( ) ; averageMatrix . set ( 0 , i , average ) ; } } return averageMatrix ; }
|
Return a matrix containing the averages for the dimension specificed .
|
2,835
|
public void processFile ( File blogFile ) throws IOException { BufferedReader br = new BufferedReader ( new FileReader ( blogFile ) ) ; String line = null ; String date = null ; String id = null ; StringBuilder content = new StringBuilder ( ) ; boolean needMoreContent = false ; while ( ( line = br . readLine ( ) ) != null ) { if ( line . contains ( "<id>" ) ) { int startIndex = line . indexOf ( ">" ) + 1 ; int endIndex = line . lastIndexOf ( "<" ) ; id = line . substring ( startIndex , endIndex ) ; } else if ( line . contains ( "<content>" ) ) { int startIndex = line . indexOf ( ">" ) + 1 ; int endIndex = line . lastIndexOf ( "<" ) ; content = new StringBuilder ( ) ; if ( endIndex > startIndex ) content . append ( line . substring ( startIndex , endIndex ) ) ; else { content . append ( line . substring ( startIndex ) ) ; needMoreContent = true ; } } else if ( needMoreContent ) { int endIndex = ( line . contains ( "</content>" ) ) ? line . lastIndexOf ( "<" ) : - 1 ; if ( endIndex > 0 ) { content . append ( line . substring ( 0 , endIndex ) ) ; needMoreContent = false ; } else content . append ( line ) ; } else if ( line . contains ( "<updated>" ) ) { int startIndex = line . indexOf ( ">" ) + 1 ; int endIndex = line . lastIndexOf ( "<" ) ; date = line . substring ( startIndex , endIndex ) ; if ( date . equals ( "" ) ) date = null ; } else if ( content != null && date != null ) { long dateTime = Timestamp . valueOf ( date ) . getTime ( ) ; if ( dateTime < beginTime || dateTime > endTime ) { needMoreContent = false ; date = null ; continue ; } String cleanedContent = processor . process ( content . toString ( ) ) ; if ( ! cleanedContent . equals ( "" ) ) { synchronized ( pw ) { pw . format ( "%d %s\n" , dateTime , cleanedContent ) ; pw . flush ( ) ; } } LOGGER . info ( String . format ( "Processed blog %s with timestamp %d" , id , dateTime ) ) ; needMoreContent = false ; date = null ; } } br . close ( ) ; }
|
Given a blog file read through each line and extract the content and updated date printing these as one line to the result file .
|
2,836
|
private Collection < Multigraph < T , E > > enumerateSimpleGraphs ( Multigraph < T , E > input , List < IntPair > connected , int curPair , Multigraph < T , E > toCopy ) { List < Multigraph < T , E > > simpleGraphs = new LinkedList < Multigraph < T , E > > ( ) ; IntPair p = connected . get ( curPair ) ; Set < E > edges = input . getEdges ( p . x , p . y ) ; for ( E e : edges ) { Multigraph < T , E > m = toCopy . copy ( toCopy . vertices ( ) ) ; m . add ( e ) ; if ( curPair + 1 < connected . size ( ) ) { simpleGraphs . addAll ( enumerateSimpleGraphs ( input , connected , curPair + 1 , m ) ) ; } else { simpleGraphs . add ( m ) ; } } return simpleGraphs ; }
|
Recursively enumerates the parallel edge permutations of the input graph building up the graphs and returning the entire set of graphs .
|
2,837
|
public Multigraph < T , E > next ( ) { if ( ! hasNext ( ) ) throw new NoSuchElementException ( ) ; Multigraph < T , E > cur = next . poll ( ) ; if ( next . isEmpty ( ) ) advance ( ) ; return cur ; }
|
Returns the next simple graph from the multigraph .
|
2,838
|
private void addRelation ( String object , String attribute ) { double val ; int row , col ; object = object . toLowerCase ( ) ; attribute = attribute . toLowerCase ( ) ; if ( objectTable . containsKey ( object ) ) { row = objectTable . get ( object ) ; } else { row = Integer . valueOf ( objectCounter . getAndIncrement ( ) ) ; objectTable . put ( object , row ) ; System . out . println ( object + " " + row ) ; } if ( attributeTable . containsKey ( attribute ) ) { col = attributeTable . get ( attribute ) ; } else { col = Integer . valueOf ( attributeCounter . getAndIncrement ( ) ) ; attributeTable . put ( attribute , col ) ; } if ( row < syntacticCooccurrence . rows ( ) && col < syntacticCooccurrence . columns ( ) ) { val = syntacticCooccurrence . get ( row , col ) ; syntacticCooccurrence . set ( row , col , val + 1 ) ; } else { syntacticCooccurrence . set ( row , col , 1.0 ) ; } }
|
Adds a relation pair to the matrix
|
2,839
|
private boolean inStartSet ( String tag ) { return tag . startsWith ( "NN" ) || tag . startsWith ( "JJ" ) || tag . startsWith ( "RB" ) || tag . startsWith ( "CD" ) ; }
|
Checks to see if the tag can modify another word
|
2,840
|
private boolean isPhraseOrClause ( String tag ) { return ( ! tag . equals ( "SYM" ) && tag . startsWith ( "S" ) ) || tag . equals ( "ADJP" ) || tag . equals ( "ADVP" ) || tag . equals ( "CONJP" ) || tag . equals ( "FRAG" ) || tag . equals ( "INTJ" ) || tag . equals ( "LST" ) || tag . equals ( "NAC" ) || tag . equals ( "NP" ) || tag . equals ( "NX" ) || tag . equals ( "PP" ) || tag . equals ( "PRN" ) || tag . equals ( "PRT" ) || tag . equals ( "QP" ) || tag . equals ( "RRC" ) || tag . equals ( "UCP" ) || tag . equals ( "VP" ) || tag . startsWith ( "WH" ) || tag . equals ( "X" ) ; }
|
Checks to see if tag marks a phrase or clause
|
2,841
|
private String getNextTag ( String str ) { String tag ; int endIndex ; int tagIndex = str . indexOf ( "(" ) ; if ( tagIndex < 0 ) { return null ; } endIndex = str . indexOf ( " " , tagIndex ) ; if ( endIndex < 0 ) { return null ; } tag = str . substring ( tagIndex + 1 , endIndex ) ; if ( tag . length ( ) > 0 ) { return tag ; } else { str = str . substring ( tagIndex + 1 ) ; return getNextTag ( str ) ; } }
|
Returns the next tag in the sentence or null if there are no more tags
|
2,842
|
public double [ ] vectorize ( List < String > phonemes ) { int nextConsonantIndex = 0 ; int nextVowelIndex = 0 ; double [ ] result = new double [ ( vowelIndices . length + consonantIndices . length ) * 3 ] ; for ( String phoneme : phonemes ) { int offset = 3 ; if ( VOWELS . contains ( phoneme ) ) offset *= vowelIndices [ nextVowelIndex ++ ] ; else offset *= consonantIndices [ nextConsonantIndex ++ ] ; double [ ] values = PHONEME_VALUES . get ( phoneme ) ; for ( int i = 0 ; i < 3 ; ++ i ) result [ i + offset ] = values [ i ] ; } return result ; }
|
Returns a left - justified syllablilic template representation of the given list of phonemes . Every three values correspond to a single phoneme representation . If six syllables are used a vector of 99 values is returned otherwise a vector of 54 values is returned .
|
2,843
|
public void process ( Iterator < String > text ) { String nextToken = null , curToken = null ; if ( text . hasNext ( ) ) nextToken = text . next ( ) ; while ( text . hasNext ( ) ) { curToken = nextToken ; nextToken = text . next ( ) ; if ( ! ( excludeToken ( curToken ) || excludeToken ( nextToken ) ) ) processBigram ( curToken , nextToken ) ; } }
|
Processes the tokens in the iterator to gather statistics for any bigrams contained therein
|
2,844
|
private void processBigram ( String left , String right ) { TokenStats leftStats = getStatsFor ( left ) ; TokenStats rightStats = getStatsFor ( right ) ; leftStats . count ++ ; rightStats . count ++ ; leftStats . leftCount ++ ; rightStats . rightCount ++ ; numBigramsInCorpus ++ ; long bigram = ( ( ( long ) leftStats . index ) << 32 ) | rightStats . index ; Number curBigramCount = bigramCounts . get ( bigram ) ; int i = ( curBigramCount == null ) ? 1 : 1 + curBigramCount . intValue ( ) ; Number val = null ; if ( i < Byte . MAX_VALUE ) val = Byte . valueOf ( ( byte ) i ) ; else if ( i < Short . MAX_VALUE ) val = Short . valueOf ( ( short ) i ) ; else val = Integer . valueOf ( i ) ; bigramCounts . put ( bigram , val ) ; }
|
Updates the statistics for the bigram formed from the provided left and right token .
|
2,845
|
public void printBigrams ( PrintWriter output , SignificanceTest test , int minOccurrencePerToken ) { String [ ] indexToToken = new String [ tokenCounts . size ( ) ] ; for ( Map . Entry < String , TokenStats > e : tokenCounts . entrySet ( ) ) indexToToken [ e . getValue ( ) . index ] = e . getKey ( ) . toString ( ) ; LOGGER . info ( "Number of bigrams: " + bigramCounts . size ( ) ) ; for ( Map . Entry < Long , Number > e : bigramCounts . entrySet ( ) ) { long bigram = e . getKey ( ) . longValue ( ) ; int firstTokenIndex = ( int ) ( bigram >>> 32 ) ; int secondTokenIndex = ( int ) ( bigram & 0xFFFFFFFFL ) ; int bigramCount = e . getValue ( ) . intValue ( ) ; TokenStats t1 = tokenCounts . get ( indexToToken [ firstTokenIndex ] ) ; TokenStats t2 = tokenCounts . get ( indexToToken [ secondTokenIndex ] ) ; if ( t1 . count < minOccurrencePerToken || t2 . count < minOccurrencePerToken ) continue ; int [ ] contingencyTable = getContingencyTable ( t1 , t2 , bigramCount ) ; double score = getScore ( contingencyTable , test ) ; output . println ( score + " " + indexToToken [ firstTokenIndex ] + " " + indexToToken [ secondTokenIndex ] ) ; } }
|
Prints all of the known bigrams where each token in the bigram must occur at least the number of specified time .
|
2,846
|
private double getScore ( int [ ] contingencyTable , SignificanceTest test ) { switch ( test ) { case PMI : return pmi ( contingencyTable ) ; case CHI_SQUARED : return chiSq ( contingencyTable ) ; case LOG_LIKELIHOOD : return logLikelihood ( contingencyTable ) ; default : throw new Error ( test + " not implemented yet" ) ; } }
|
Returns the score of the contingency table using the specified significance test
|
2,847
|
private double logLikelihood ( int [ ] contingencyTable ) { int [ ] t = contingencyTable ; int col1sum = t [ 0 ] + t [ 2 ] ; int col2sum = t [ 1 ] + t [ 3 ] ; int row1sum = t [ 0 ] + t [ 1 ] ; int row2sum = t [ 2 ] + t [ 3 ] ; double sum = row1sum + row2sum ; double aExp = ( row1sum / sum ) * col1sum ; double bExp = ( row1sum / sum ) * col2sum ; double cExp = ( row2sum / sum ) * col1sum ; double dExp = ( row2sum / sum ) * col2sum ; return 2 * ( ( t [ 0 ] * Math . log ( t [ 0 ] - aExp ) ) + ( t [ 1 ] * Math . log ( t [ 1 ] - bExp ) ) + ( t [ 2 ] * Math . log ( t [ 2 ] - cExp ) ) + ( t [ 3 ] * Math . log ( t [ 3 ] - dExp ) ) ) ; }
|
Returns the log - likelihood score of the contingency table
|
2,848
|
public int getDimension ( DependencyPath path ) { String endToken = path . last ( ) . word ( ) ; String relation = path . getRelation ( path . length ( ) - 1 ) ; return getDimensionInternal ( endToken + "+" + relation ) ; }
|
Returns the dimension number corresponding to the term at the end of the provided path .
|
2,849
|
public synchronized DoubleVector generate ( ) { DoubleVector termVector = new DenseVector ( indexVectorLength ) ; for ( int i = 0 ; i < indexVectorLength ; i ++ ) termVector . set ( i , mean + ( randomGenerator . nextGaussian ( ) * stdev ) ) ; return termVector ; }
|
Generate a new random vector using a guassian distribution for each value .
|
2,850
|
protected void addContextTerms ( SparseDoubleVector meaning , Queue < String > words , int distance ) { for ( String term : words ) { if ( ! term . equals ( IteratorFactory . EMPTY_TOKEN ) ) { int dimension = basis . getDimension ( term ) ; if ( dimension == - 1 ) continue ; meaning . set ( dimension , weighting . weight ( distance , windowSize ) ) ; ++ distance ; } } }
|
Adds a feature for each word in the context that has a valid dimension . Feature are scored based on the context word s distance from the focus word .
|
2,851
|
@ SuppressWarnings ( "unchecked" ) private void processSpace ( ) throws IOException { compressedDocumentsWriter . close ( ) ; String [ ] indexToTerm = new String [ termToIndex . size ( ) ] ; for ( Map . Entry < String , Integer > e : termToIndex . entrySet ( ) ) indexToTerm [ e . getValue ( ) ] = e . getKey ( ) ; int corpusSize = 0 ; for ( AtomicInteger i : termCounts ) corpusSize += i . get ( ) ; final int uniqueTerms = cooccurrenceMatrix . rows ( ) ; LOGGER . info ( "calculating term features" ) ; final BitSet [ ] termFeatures = new BitSet [ wordIndexCounter ] ; for ( int termIndex = 0 ; termIndex < uniqueTerms ; ++ termIndex ) { String term = indexToTerm [ termIndex ] ; termFeatures [ termIndex ] = calculateTermFeatures ( term , corpusSize ) ; } LOGGER . info ( "reprocessing corpus to generate feature vectors" ) ; final BlockingQueue < Runnable > workQueue = new LinkedBlockingQueue < Runnable > ( ) ; for ( int i = 0 ; i < Runtime . getRuntime ( ) . availableProcessors ( ) ; ++ i ) { Thread t = new WorkerThread ( workQueue ) ; t . start ( ) ; } final Semaphore termsProcessed = new Semaphore ( 0 ) ; for ( int termIndex = 0 ; termIndex < uniqueTerms ; ++ termIndex ) { final String term = indexToTerm [ termIndex ] ; final int i = termIndex ; workQueue . offer ( new Runnable ( ) { public void run ( ) { try { LOGGER . fine ( String . format ( "processing term %6d/%d: %s" , i , uniqueTerms , term ) ) ; Matrix contexts = getTermContexts ( i , termFeatures [ i ] ) ; senseInduce ( term , contexts ) ; } catch ( IOException ioe ) { ioe . printStackTrace ( ) ; } finally { termsProcessed . release ( ) ; } } } ) ; } try { termsProcessed . acquire ( uniqueTerms ) ; } catch ( InterruptedException ie ) { throw new Error ( "interrupted while waiting for terms to " + "finish reprocessing" , ie ) ; } LOGGER . info ( "finished reprocessing all terms" ) ; }
|
Calculates the first order co - occurrence statics to determine the feature set for each term then clusters the feature vectors for each terms contexts and finally induces the sense - specific vectors for each term .
|
2,852
|
private void senseInduce ( String term , Matrix contexts ) throws IOException { LOGGER . fine ( "Clustering " + contexts . rows ( ) + " contexts for " + term ) ; int numClusters = Math . min ( 7 , contexts . rows ( ) ) ; if ( ! ( term . matches ( "[a-zA-z]+" ) && numClusters > 6 ) ) { SparseDoubleVector meanSenseVector = new CompactSparseVector ( termToIndex . size ( ) ) ; int rows = contexts . rows ( ) ; for ( int row = 0 ; row < rows ; ++ row ) VectorMath . add ( meanSenseVector , contexts . getRowVector ( row ) ) ; termToVector . put ( term , meanSenseVector ) ; return ; } Assignments clusterAssignment = new ClutoClustering ( ) . cluster ( contexts , numClusters , ClutoClustering . Method . AGGLOMERATIVE , ClutoClustering . Criterion . UPGMA ) ; LOGGER . fine ( "Generative sense vectors for " + term ) ; int [ ] clusterSize = new int [ numClusters ] ; SparseDoubleVector [ ] meanSenseVectors = new CompactSparseVector [ numClusters ] ; for ( int i = 0 ; i < meanSenseVectors . length ; ++ i ) meanSenseVectors [ i ] = new CompactSparseVector ( termToIndex . size ( ) ) ; for ( int row = 0 ; row < clusterAssignment . size ( ) ; ++ row ) { if ( clusterAssignment . get ( row ) . assignments ( ) . length == 0 ) continue ; int assignment = clusterAssignment . get ( row ) . assignments ( ) [ 0 ] ; clusterSize [ assignment ] ++ ; DoubleVector contextVector = contexts . getRowVector ( row ) ; VectorMath . add ( meanSenseVectors [ assignment ] , contextVector ) ; } int senseCounter = 0 ; for ( int i = 0 ; i < numClusters ; ++ i ) { int size = clusterSize [ i ] ; if ( size / ( double ) ( contexts . rows ( ) ) > 0.02 ) { String termWithSense = ( senseCounter == 0 ) ? term : term + "-" + senseCounter ; senseCounter ++ ; termToVector . put ( termWithSense , meanSenseVectors [ i ] ) ; } } LOGGER . fine ( "Discovered " + senseCounter + " senses for " + term ) ; }
|
Given a matrix for the term where each row is a different context clusters the rows to identify how many senses the word has .
|
2,853
|
private int processIntDocument ( int termIndex , int [ ] document , Matrix contextMatrix , int rowStart , BitSet featuresForTerm ) { int contexts = 0 ; for ( int i = 0 ; i < document . length ; ++ i ) { int curToken = document [ i ] ; if ( curToken != termIndex ) continue ; SparseArray < Integer > contextCounts = new SparseHashArray < Integer > ( ) ; for ( int left = Math . max ( i - contextWindowSize , 0 ) ; left < i ; ++ left ) { int token = document [ left ] ; if ( token >= 0 && featuresForTerm . get ( token ) ) { Integer count = contextCounts . get ( token ) ; contextCounts . set ( token , ( count == null ) ? 1 : count + 1 ) ; } } int end = Math . min ( i + contextWindowSize , document . length ) ; for ( int right = i + 1 ; right < end ; ++ right ) { int token = document [ right ] ; if ( token >= 0 && featuresForTerm . get ( token ) ) { Integer count = contextCounts . get ( token ) ; contextCounts . set ( token , ( count == null ) ? 1 : count + 1 ) ; } } int curContext = rowStart + contexts ; for ( int feat : contextCounts . getElementIndices ( ) ) { contextMatrix . set ( curContext , feat , contextCounts . get ( feat ) ) ; } contexts ++ ; } return contexts ; }
|
Processes the compressed version of a document where each integer indicates that token s index and identifies all the contexts for the target word adding them as new rows to the context matrix .
|
2,854
|
private static double logLikelihood ( double a , double b , double c , double d ) { double col1sum = a + c ; double col2sum = b + d ; double row1sum = a + b ; double row2sum = c + d ; double sum = row1sum + row2sum ; double aExp = ( row1sum / sum ) * col1sum ; double bExp = ( row1sum / sum ) * col2sum ; double cExp = ( row2sum / sum ) * col1sum ; double dExp = ( row2sum / sum ) * col2sum ; double aVal = ( a == 0 ) ? 0 : a * Math . log ( a / aExp ) ; double bVal = ( b == 0 ) ? 0 : b * Math . log ( b / bExp ) ; double cVal = ( c == 0 ) ? 0 : c * Math . log ( c / cExp ) ; double dVal = ( d == 0 ) ? 0 : d * Math . log ( d / dExp ) ; return 2 * ( aVal + bVal + cVal + dVal ) ; }
|
Returns the log - likelihood of the contingency table made up of the four values .
|
2,855
|
private void checkIndices ( int row , int col ) { if ( row < 0 || row >= rows ) throw new ArrayIndexOutOfBoundsException ( "row: " + row ) ; else if ( col < 0 || col >= cols ) throw new ArrayIndexOutOfBoundsException ( "column: " + col ) ; }
|
Check that the indices of a requested cell are within bounds .
|
2,856
|
public boolean add ( WeightedEdge e ) { int toAdd = - 1 ; if ( e . from ( ) == rootVertex ) toAdd = e . to ( ) ; else if ( e . to ( ) == rootVertex ) toAdd = e . from ( ) ; else { return false ; } double w = e . weight ( ) ; if ( edges . containsKey ( toAdd ) ) { double w2 = edges . put ( toAdd , w ) ; return false ; } else { edges . put ( toAdd , w ) ; return true ; } }
|
Adds the edge to this set if one of the vertices is the root vertex .
|
2,857
|
public DoubleVector centerOfMass ( ) { if ( centroid == null ) { if ( indices . size ( ) == 1 ) centroid = sumVector ; else { int length = sumVector . length ( ) ; double d = 1d / indices . size ( ) ; if ( sumVector instanceof SparseVector ) { centroid = new SparseHashDoubleVector ( length ) ; SparseVector sv = ( SparseVector ) sumVector ; for ( int nz : sv . getNonZeroIndices ( ) ) centroid . set ( nz , sumVector . get ( nz ) * d ) ; } else { centroid = new DenseVector ( length ) ; for ( int i = 0 ; i < length ; ++ i ) centroid . set ( i , sumVector . get ( i ) * d ) ; } } } return centroid ; }
|
Returns the average data point assigned to this candidate cluster
|
2,858
|
public void add ( int index , DoubleVector v ) { boolean added = indices . add ( index ) ; assert added : "Adding duplicate indices to candidate facility" ; if ( sumVector == null ) { sumVector = ( v instanceof SparseVector ) ? new SparseHashDoubleVector ( v ) : new DenseVector ( v ) ; } else { VectorMath . add ( sumVector , v ) ; centroid = null ; } }
|
Adds the data point with the specified index to the facility
|
2,859
|
public void merge ( CandidateCluster other ) { indices . addAll ( other . indices ) ; VectorMath . add ( sumVector , other . sumVector ) ; centroid = null ; }
|
Merges the elements assigned to the other cluster into this one .
|
2,860
|
private void printSpace ( SemanticSpace sspace , String tag ) { try { String EXT = ".sspace" ; File output = ( overwrite ) ? new File ( outputDir , sspace . getSpaceName ( ) + tag + EXT ) : File . createTempFile ( sspace . getSpaceName ( ) + tag , EXT , outputDir ) ; long startTime = System . currentTimeMillis ( ) ; SemanticSpaceIO . save ( sspace , output , format ) ; long endTime = System . currentTimeMillis ( ) ; verbose ( "printed space in %.3f seconds%n" , ( ( endTime - startTime ) / 1000d ) ) ; } catch ( IOException ioe ) { throw new IOError ( ioe ) ; } }
|
Prints the semantic space to file inserting the tag into the . sspace file name
|
2,861
|
private void updateTemporalSemantics ( long currentSemanticPartitionStartTime , SemanticSpace semanticPartition ) { double [ ] zeroVector = new double [ semanticPartition . getVectorLength ( ) ] ; for ( String word : interestingWords ) { SortedMap < Long , double [ ] > temporalSemantics = wordToTemporalSemantics . get ( word ) ; Vector v = semanticPartition . getVector ( word ) ; double [ ] semantics = ( v == null ) ? zeroVector : Vectors . asDouble ( v ) . toArray ( ) ; temporalSemantics . put ( currentSemanticPartitionStartTime , semantics ) ; } }
|
Adds the temporal semantics for each interesting word using the provided semantic partition .
|
2,862
|
private void printShiftRankings ( String dateString , long startOfMostRecentPartition , TimeSpan partitionDuration ) throws IOException { SortedMultiMap < Double , String > shiftToWord = new TreeMultiMap < Double , String > ( ) ; TimeSpan twoPartitions = new TimeSpan ( partitionDuration . getYears ( ) * 2 , partitionDuration . getMonths ( ) * 2 , partitionDuration . getWeeks ( ) * 2 , partitionDuration . getDays ( ) * 2 , partitionDuration . getHours ( ) * 2 ) ; for ( Map . Entry < String , SortedMap < Long , double [ ] > > e : wordToTemporalSemantics . entrySet ( ) ) { String word = e . getKey ( ) ; SortedMap < Long , double [ ] > m = e . getValue ( ) ; if ( m . size ( ) < 2 ) continue ; NavigableMap < Long , double [ ] > timestampToVector = ( e instanceof NavigableMap ) ? ( NavigableMap < Long , double [ ] > ) m : new TreeMap < Long , double [ ] > ( m ) ; Map . Entry < Long , double [ ] > mostRecent = timestampToVector . lastEntry ( ) ; if ( ! mostRecent . getKey ( ) . equals ( startOfMostRecentPartition ) ) continue ; Map . Entry < Long , double [ ] > secondMostRecent = timestampToVector . lowerEntry ( mostRecent . getKey ( ) ) ; if ( ! twoPartitions . insideRange ( secondMostRecent . getKey ( ) , mostRecent . getKey ( ) ) ) continue ; shiftToWord . put ( Similarity . cosineSimilarity ( secondMostRecent . getValue ( ) , mostRecent . getValue ( ) ) , word ) ; } PrintWriter pw = new PrintWriter ( new File ( outputDir , "shift-ranks-for." + dateString + ".txt" ) ) ; for ( Map . Entry < Double , String > e : shiftToWord . entrySet ( ) ) { pw . println ( e . getKey ( ) + "\t" + e . getValue ( ) ) ; } pw . close ( ) ; }
|
Computes the ranking of which words underwent the most dramatic shifts in the most recent partition and then prints the ranking list of a file .
|
2,863
|
protected void usage ( ) { System . out . println ( "usage: java FixedDurationTemporalRandomIndexingMain [options] " + "<output-dir>\n\n" + argOptions . prettyPrint ( ) + "\nFixed-Duration TRI provides four main output options:\n\n" + " 1) Outputting each semantic partition as a separate .sspace file. " + "Each file\n is named using the yyyy_MM_ww_dd_hh format to " + "indicate it start date.\n This is the most expensive of the " + "operations due to I/O overhead.\n\n" + " The remaining options require the use of the -I " + "--interestingTokenList option to\n specify a set of word for use" + " in tracking temporal changes.\n\n 2) For each of the interesting" + "words, -P, --printInterestingTokenShifts will track\n" + " the semantics" + " through time and report the semantic shift along with other\n" + " distance statistics.\n\n" + " 3) For each of the interesting words, -N, " + "--printInterestingTokenNeighbors\n will print the nearest " + "neighbor for each in the semantic space. The\n number " + "of neighbors to print should be specified.\n\n" + " 4) For each of the interesting words, generate the list of " + "similar\n neighbors using the --printInterestingTokenNeighbors" + " and then compare\n those neighbors with each other using " + "the\n --printInterestingTokenNeighborComparison option. " + "This creates a file\n with the pair-wise cosine similarities " + "for all neighbors. Note that this\n option requires both " + "flags to be specified.\n\n" + "Semantic filters limit the set of tokens for which the " + "semantics are kept.\nThis limits the potential memory overhead " + "for calculating semantics for a\nlarge set of words." + "\n\n" + OptionDescriptions . COMPOUND_WORDS_DESCRIPTION + "\n\n" + OptionDescriptions . TOKEN_FILTER_DESCRIPTION + "\n\n" + OptionDescriptions . FILE_FORMAT_DESCRIPTION + "\n\n" + OptionDescriptions . HELP_DESCRIPTION ) ; }
|
Prints the instructions on how to execute this program to standard out .
|
2,864
|
public Iterator < T > iterator ( ) { List < Iterator < T > > iters = new ArrayList < Iterator < T > > ( sets . size ( ) ) ; for ( Set < T > s : sets ) iters . add ( s . iterator ( ) ) ; return new CombinedIterator < T > ( iters ) ; }
|
Returns an iterator over all the unique items across all sets .
|
2,865
|
public int size ( ) { int size = 0 ; for ( Set < T > s : sets ) size += s . size ( ) ; return size ; }
|
Returns the number of unique items across all sets .
|
2,866
|
public void readFields ( DataInput in ) throws IOException { t . readFields ( in ) ; position = in . readInt ( ) ; }
|
Deserializes the internal data from the provided stream .
|
2,867
|
public void write ( DataOutput out ) throws IOException { t . write ( out ) ; out . writeInt ( position ) ; }
|
Serailizes the internsal data to the provided stream
|
2,868
|
private void normalize ( DoubleVector v ) { double magnitude = 0 ; for ( int i = 0 ; i < v . length ( ) ; ++ i ) magnitude += Math . pow ( v . get ( i ) , 2 ) ; if ( magnitude == 0 ) return ; magnitude = Math . sqrt ( magnitude ) ; for ( int i = 0 ; i < v . length ( ) ; ++ i ) v . set ( i , v . get ( i ) / magnitude ) ; }
|
Performs l2 - normalization on the vector in place . If the magnitude of the vector is 0 the values are left unchanged .
|
2,869
|
private DoubleVector groupConvolution ( Queue < String > prevWords , Queue < String > nextWords ) { DoubleVector result = new DenseVector ( indexVectorSize ) ; String prevWord = prevWords . peek ( ) ; DoubleVector tempConvolution ; if ( ! prevWord . equals ( IteratorFactory . EMPTY_TOKEN ) ) { tempConvolution = convolute ( vectorMap . get ( prevWords . peek ( ) ) , placeHolder ) ; VectorMath . add ( result , tempConvolution ) ; } else tempConvolution = placeHolder ; for ( String term : nextWords ) { if ( term . equals ( IteratorFactory . EMPTY_TOKEN ) ) continue ; tempConvolution = convolute ( tempConvolution , vectorMap . get ( term ) ) ; VectorMath . add ( result , tempConvolution ) ; } tempConvolution = placeHolder ; for ( String term : nextWords ) { if ( term . equals ( IteratorFactory . EMPTY_TOKEN ) ) continue ; tempConvolution = convolute ( tempConvolution , vectorMap . get ( term ) ) ; VectorMath . add ( result , tempConvolution ) ; } return result ; }
|
Generate the circular convoltion of n - grams composed of words in the given context . The result of this convolution is returned as a DoubleVector .
|
2,870
|
protected void setup ( Mapper . Context context ) { Configuration conf = context . getConfiguration ( ) ; extractor = new CooccurrenceExtractor ( conf ) ; Properties props = new Properties ( ) ; for ( String property : ITERATOR_FACTORY_PROPERTIES ) { String propVal = conf . get ( property ) ; if ( propVal != null ) props . setProperty ( property , propVal ) ; } ResourceFinder hadoopRf = null ; try { hadoopRf = new HadoopResourceFinder ( conf ) ; } catch ( IOException ioe ) { throw new IOError ( ioe ) ; } IteratorFactory . setResourceFinder ( hadoopRf ) ; IteratorFactory . setProperties ( props ) ; }
|
Initializes all the properties for this particular mapper . This process includes setting up the window size and configuring how the input documents will be tokenized .
|
2,871
|
public DependencyPath next ( ) { if ( next == null ) throw new NoSuchElementException ( "No further paths to return" ) ; DependencyPath p = next ; advance ( ) ; return p ; }
|
Returns the next path that meets the requirements .
|
2,872
|
public long getPrimitive ( int index ) { if ( index < 0 || index >= maxLength ) { throw new ArrayIndexOutOfBoundsException ( "invalid index: " + index ) ; } int pos = Arrays . binarySearch ( indices , index ) ; long value = ( pos >= 0 ) ? values [ pos ] : 0 ; return value ; }
|
Retrieve the value at specified index or 0 if no value had been specified .
|
2,873
|
public long [ ] toPrimitiveArray ( long [ ] array ) { for ( int i = 0 , j = 0 ; i < array . length ; ++ i ) { int index = - 1 ; if ( j < indices . length && ( index = indices [ j ] ) == i ) { array [ i ] = values [ j ] ; j ++ ; } else array [ i ] = 0 ; } return array ; }
|
Sets the values of the provided array using the contents of this array . If the provided array is longer than this array the additional values are left unchanged .
|
2,874
|
private Function getFunction ( int exponent , int dimensions ) { if ( exponent == 0 ) { int [ ] func = new int [ dimensions ] ; for ( int i = 0 ; i < dimensions ; ++ i ) { func [ i ] = i ; } return new Function ( func , func ) ; } exponent = Math . abs ( exponent ) ; Function function = permutationToReordering . get ( exponent ) ; if ( function == null ) { synchronized ( this ) { function = permutationToReordering . get ( exponent ) ; if ( function == null ) { int priorExponent = exponent - 1 ; Function priorFunc = getFunction ( priorExponent , dimensions ) ; Integer [ ] objFunc = new Integer [ dimensions ] ; for ( int i = 0 ; i < dimensions ; ++ i ) { objFunc [ i ] = Integer . valueOf ( priorFunc . forward [ i ] ) ; } java . util . List < Integer > list = Arrays . asList ( objFunc ) ; Collections . shuffle ( list , RANDOM ) ; int [ ] forwardMapping = new int [ dimensions ] ; int [ ] backwardMapping = new int [ dimensions ] ; for ( int i = 0 ; i < dimensions ; ++ i ) { forwardMapping [ i ] = objFunc [ i ] . intValue ( ) ; backwardMapping [ objFunc [ i ] . intValue ( ) ] = i ; } System . out . printf ( "Forward: %s%nBackward: %s%n" , Arrays . toString ( forwardMapping ) , Arrays . toString ( backwardMapping ) ) ; function = new Function ( forwardMapping , backwardMapping ) ; permutationToReordering . put ( exponent , function ) ; } } } return function ; }
|
Returns the bijective mapping for each integer in the form of an array based on the the current exponent of the permutation .
|
2,875
|
public V put ( K key , V value ) { V old = super . put ( key , value ) ; if ( size ( ) > bound ) { remove ( firstKey ( ) ) ; } return old ; }
|
Adds the key - value mapping to this map and if the total number of mappings exceeds the bounds removes either the currently lowest element or if reversed the currently highest element .
|
2,876
|
private void updateTimeRange ( long timestamp ) { if ( timestamp < startTime ) { startTime = timestamp ; } if ( timestamp > endTime ) { endTime = timestamp ; } }
|
Updates the start and end times if this time stamp exceeds either .
|
2,877
|
public String getDimensionDescription ( int dimension ) { if ( dimension < 0 || dimension >= basisMapping . numDimensions ( ) ) throw new IllegalArgumentException ( "Invalid dimension: " + dimension ) ; return basisMapping . getDimensionDescription ( dimension ) ; }
|
Returns a description of the dependency path feature to which the provided dimension is mapped .
|
2,878
|
private boolean acceptWord ( String word ) { return ! word . equals ( EMPTY_STRING ) && ( semanticFilter . isEmpty ( ) || semanticFilter . contains ( word ) ) ; }
|
Returns true if there is no semantic filter list or the word is in the filter list .
|
2,879
|
private void removeHtmlComments ( StringBuilder article ) { int htmlCommentStart = article . indexOf ( "<!--" ) ; while ( htmlCommentStart >= 0 ) { int htmlCommentEnd = article . indexOf ( " , htmlCommentStart ) ; if ( htmlCommentEnd > htmlCommentStart ) article . delete ( htmlCommentStart , htmlCommentEnd + 3 ) ; else break ; htmlCommentStart = article . indexOf ( "<!--" , htmlCommentStart ) ; } }
|
Removes HTML comments from the article text
|
2,880
|
private int getTokenCount ( String article ) { Pattern notWhiteSpace = Pattern . compile ( "\\S+" ) ; Matcher matcher = notWhiteSpace . matcher ( article ) ; int tokens = 0 ; while ( matcher . find ( ) ) tokens ++ ; return tokens ; }
|
Returns the number of tokens in the article .
|
2,881
|
private long getIndex ( T x , T y ) { int i = elementIndices . index ( x ) ; int j = elementIndices . index ( y ) ; long index = ( ( ( long ) i ) << 32 ) | j ; return index ; }
|
Returns the concatenated index of the two elements .
|
2,882
|
public int getCount ( T x , T y ) { return counts . get ( getIndex ( x , y ) ) ; }
|
Returns the number of times the specified pair of objects has been seen by this counter .
|
2,883
|
public void reset ( ) { data . rewind ( ) ; data . getInt ( ) ; data . getInt ( ) ; data . getInt ( ) ; curCol = 0 ; entry = 0 ; try { advance ( ) ; } catch ( IOException ioe ) { throw new IOError ( ioe ) ; } }
|
Resets the iterator to the start of the file s data .
|
2,884
|
public static void save ( Object o , File file ) { try { FileOutputStream fos = new FileOutputStream ( file ) ; ObjectOutputStream outStream = new ObjectOutputStream ( new BufferedOutputStream ( fos ) ) ; outStream . writeObject ( o ) ; outStream . close ( ) ; } catch ( IOException ioe ) { throw new IOError ( ioe ) ; } }
|
Serializes the object to the provided file .
|
2,885
|
public static void save ( Object o , OutputStream stream ) { try { ObjectOutputStream outStream = ( stream instanceof ObjectOutputStream ) ? ( ObjectOutputStream ) stream : new ObjectOutputStream ( stream ) ; outStream . writeObject ( o ) ; } catch ( IOException ioe ) { throw new IOError ( ioe ) ; } }
|
Serializes the object to the provided stream . This method does not close the stream after writing .
|
2,886
|
@ SuppressWarnings ( "unchecked" ) public static < T > T load ( InputStream stream ) { try { ObjectInputStream inStream = ( stream instanceof ObjectInputStream ) ? ( ObjectInputStream ) stream : new ObjectInputStream ( stream ) ; T object = ( T ) inStream . readObject ( ) ; return object ; } catch ( IOException ioe ) { throw new IOError ( ioe ) ; } catch ( ClassNotFoundException cnfe ) { throw new IOError ( cnfe ) ; } }
|
Loads a serialized object of the specifed type from the stream . This method does not close the stream after reading
|
2,887
|
public static < T > Iterator < T > join ( Collection < Iterable < T > > iterables ) { Queue < Iterator < T > > iters = new ArrayDeque < Iterator < T > > ( iterables . size ( ) ) ; for ( Iterable < T > i : iterables ) iters . add ( i . iterator ( ) ) ; return new CombinedIterator < T > ( iters ) ; }
|
Joins the iterators of all the provided iterables as one unified iterator .
|
2,888
|
private void advance ( ) { if ( current == null || ! current . hasNext ( ) ) { do { current = iters . poll ( ) ; } while ( current != null && ! current . hasNext ( ) ) ; } }
|
Moves to the next iterator in the queue if the current iterator is out of elements .
|
2,889
|
public synchronized T next ( ) { if ( current == null ) { throw new NoSuchElementException ( ) ; } T t = current . next ( ) ; if ( toRemoveFrom != current ) toRemoveFrom = current ; advance ( ) ; return t ; }
|
Returns the next element from some iterator .
|
2,890
|
public DoubleVector buildVector ( BufferedReader document , DoubleVector documentVector ) { Map < String , Integer > termCounts = new HashMap < String , Integer > ( ) ; Iterator < String > articleTokens = IteratorFactory . tokenize ( document ) ; while ( articleTokens . hasNext ( ) ) { String term = articleTokens . next ( ) ; Integer count = termCounts . get ( term ) ; termCounts . put ( term , ( count == null || ! useTermFreq ) ? 1 : count . intValue ( ) + 1 ) ; } Set < String > knownWords = sspace . getWords ( ) ; for ( Map . Entry < String , Integer > entry : termCounts . entrySet ( ) ) { if ( knownWords . contains ( entry . getKey ( ) ) ) { Vector termVector = sspace . getVector ( entry . getKey ( ) ) ; if ( termVector == null ) continue ; add ( documentVector , termVector , entry . getValue ( ) ) ; } } return documentVector ; }
|
Represent a document as the summation of term Vectors .
|
2,891
|
public Graph < Edge > readUndirectedFromWeighted ( File f , Indexer < String > vertexIndexer , double minWeight ) throws IOException { BufferedReader br = new BufferedReader ( new FileReader ( f ) ) ; Graph < Edge > g = new SparseUndirectedGraph ( ) ; int lineNo = 0 ; for ( String line = null ; ( line = br . readLine ( ) ) != null ; ) { ++ lineNo ; line = line . trim ( ) ; if ( line . startsWith ( "#" ) ) continue ; else if ( line . length ( ) == 0 ) continue ; String [ ] arr = line . split ( "\\s+" ) ; if ( arr . length < 2 ) throw new IOException ( "Missing vertex on line " + lineNo ) ; if ( arr . length < 3 ) throw new IOException ( "Missing edge weight on line " + lineNo ) ; int v1 = vertexIndexer . index ( arr [ 0 ] ) ; int v2 = vertexIndexer . index ( arr [ 1 ] ) ; double weight = Double . parseDouble ( arr [ 2 ] ) ; if ( weight >= minWeight ) g . add ( new SimpleEdge ( v1 , v2 ) ) ; if ( lineNo % 100000 == 0 ) veryVerbose ( LOGGER , "Read %d lines from %s" , lineNo , f ) ; } verbose ( LOGGER , "Read directed graph with %d vertices and %d edges" , g . order ( ) , g . size ( ) ) ; return g ; }
|
Reads in an undirected network from a file containing weighted edges only keeping those undirected edges whose weight was above the specified threshold
|
2,892
|
private static void clusterIteration ( Matrix matrix , int numClusters , KMeansSeed seedType , CriterionFunction criterion ) { DoubleVector [ ] centers = seedType . chooseSeeds ( numClusters , matrix ) ; int [ ] initialAssignments = new int [ matrix . rows ( ) ] ; if ( numClusters != 1 ) { int nc = 0 ; for ( int i = 0 ; i < matrix . rows ( ) ; ++ i ) { DoubleVector vector = matrix . getRowVector ( i ) ; double bestSimilarity = 0 ; for ( int c = 0 ; c < numClusters ; ++ c ) { double similarity = Similarity . cosineSimilarity ( centers [ c ] , vector ) ; nc ++ ; if ( similarity >= bestSimilarity ) { bestSimilarity = similarity ; initialAssignments [ i ] = c ; } } } } criterion . setup ( matrix , initialAssignments , numClusters ) ; List < Integer > indices = new ArrayList < Integer > ( matrix . rows ( ) ) ; for ( int i = 0 ; i < matrix . rows ( ) ; ++ i ) indices . add ( i ) ; boolean changed = true ; while ( changed ) { changed = false ; Collections . shuffle ( indices ) ; for ( int index : indices ) changed |= criterion . update ( index ) ; } }
|
Performs one iteration of Direct Clustering over the data set .
|
2,893
|
protected static Set < String > loadValidTermSet ( String validTermsFileName ) throws IOException { Set < String > validTerms = new HashSet < String > ( ) ; BufferedReader br = new BufferedReader ( new FileReader ( validTermsFileName ) ) ; for ( String line = null ; ( line = br . readLine ( ) ) != null ; ) { validTerms . add ( line ) ; } br . close ( ) ; return validTerms ; }
|
Returns a set of terms based on the contents of the provided file . Each word is expected to be on its own line .
|
2,894
|
private void retainOnly ( int columns ) { LOGGER . info ( "Sorting the columns by entropy and computing the top " + columns + " columns to retain" ) ; int words = termToIndex . numDimensions ( ) ; MultiMap < Double , Integer > entropyToIndex = new BoundedSortedMultiMap < Double , Integer > ( columns , false , true , true ) ; EntropyStats stats = MatrixEntropy . entropy ( cooccurrenceMatrix ) ; for ( int col = 0 ; col < words ; ++ col ) entropyToIndex . put ( stats . colEntropy [ col ] , col ) ; for ( int row = 0 ; row < words ; ++ row ) entropyToIndex . put ( stats . rowEntropy [ row ] , row + words ) ; Set < Integer > indicesToKeep = new HashSet < Integer > ( entropyToIndex . values ( ) ) ; LOGGER . info ( "Reducing to " + columns + " highest entropy columns." ) ; reduced = retainColumns ( indicesToKeep ) ; cooccurrenceMatrix = null ; }
|
Drops all but the specified number of columns retaining those that have the highest information theoretic entropy .
|
2,895
|
private void processWordsInNP ( ArrayList < Pair < String > > wordsInPhrase ) { if ( wordsInPhrase . size ( ) > 1 ) { for ( int i = 0 ; i < wordsInPhrase . size ( ) - 1 ; i ++ ) { if ( inStartSet ( wordsInPhrase . get ( i ) . x ) ) { for ( int j = i + 1 ; j < wordsInPhrase . size ( ) ; j ++ ) { if ( inReceiveSet ( wordsInPhrase . get ( j ) . x ) ) { wordRelationsWriter . println ( wordsInPhrase . get ( j ) . y + " " + wordsInPhrase . get ( i ) . y ) ; addRelation ( wordsInPhrase . get ( j ) . y , wordsInPhrase . get ( i ) . y ) ; } } } } } }
|
Creates relations between words in a noun phrase
|
2,896
|
protected Double computeAssociation ( SemanticSpace sspace , String word1 , String word2 ) { Vector v1 = sspace . getVector ( word1 ) ; Vector v2 = sspace . getVector ( word2 ) ; if ( v1 == null || v2 == null ) return null ; double rank1 = findRank ( sspace , word1 , word2 ) ; double rank2 = findRank ( sspace , word2 , word1 ) ; return 2d / ( rank1 + rank2 ) ; }
|
Returns the association of the two words on a scale of 0 to 1 .
|
2,897
|
protected double computeScore ( double [ ] humanScores , double [ ] compScores ) { double average = 0 ; for ( double score : compScores ) average += score ; return average / compScores . length ; }
|
Returns the average computer generated score on the Deese Antonymy test .
|
2,898
|
public static void bitreverse ( DoubleVector data , int i0 , int stride ) { int n = data . length ( ) ; for ( int i = 0 , j = 0 ; i < n - 1 ; i ++ ) { int k = n / 2 ; if ( i < j ) { double tmp = data . get ( i0 + stride * i ) ; data . set ( i0 + stride * i , data . get ( i0 + stride * j ) ) ; data . set ( i0 + stride * j , tmp ) ; } while ( k <= j ) { j = j - k ; k = k / 2 ; } j += k ; } }
|
This is the Gold rader bit - reversal algorithm
|
2,899
|
protected int getIndexFromMap ( int [ ] maskMap , int index ) { if ( index < 0 || index >= maskMap . length ) throw new IndexOutOfBoundsException ( "The given index is beyond the bounds of the matrix" ) ; int newIndex = maskMap [ index ] ; if ( newIndex < 0 || maskMap == rowMaskMap && newIndex >= matrix . rows ( ) || maskMap == colMaskMap && newIndex >= matrix . columns ( ) ) throw new IndexOutOfBoundsException ( "The mapped index is beyond the bounds of the base matrix" ) ; return newIndex ; }
|
Returns the new index value for a given index from a given mapping . Returns - 1 if no mapping is found for the requested row .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.