idx
int64 0
41.2k
| question
stringlengths 74
4.04k
| target
stringlengths 7
750
|
|---|---|---|
38,100
|
public static int getWeekdayOfDate ( String date ) { SimpleDateFormat formatter = new SimpleDateFormat ( "yyyy-MM-dd" ) ; int weekday = 0 ; Calendar c = Calendar . getInstance ( ) ; try { c . setTime ( formatter . parse ( date ) ) ; weekday = c . get ( Calendar . DAY_OF_WEEK ) ; } catch ( ParseException e ) { e . printStackTrace ( ) ; } return weekday ; }
|
Get the weekday of date
|
38,101
|
public static int getWeekOfDate ( String date ) { SimpleDateFormat formatter = new SimpleDateFormat ( "yyyy-MM-dd" ) ; int week = 0 ; ; Calendar c = Calendar . getInstance ( ) ; try { c . setTime ( formatter . parse ( date ) ) ; week = c . get ( Calendar . WEEK_OF_YEAR ) ; } catch ( ParseException e ) { e . printStackTrace ( ) ; } return week ; }
|
Get the week of date
|
38,102
|
public static Locale getLocaleFromString ( String locale ) throws LocaleException { for ( Locale l : Locale . getAvailableLocales ( ) ) { if ( locale . toLowerCase ( ) . equals ( l . toString ( ) . toLowerCase ( ) ) ) { return l ; } } throw new LocaleException ( ) ; }
|
takes a desired locale input string iterates through available locales returns a locale object
|
38,103
|
public void allocateMemory ( int memorySize ) { this . memorySize = memorySize ; memory = new PairDblInt [ memorySize ] [ numLabels ] ; for ( int i = 0 ; i < memorySize ; i ++ ) { for ( int j = 0 ; j < numLabels ; j ++ ) { memory [ i ] [ j ] = new PairDblInt ( ) ; } } }
|
Allocate memory .
|
38,104
|
public void computeMi ( ) { Mi . assign ( 0.0 ) ; model . taggerFGen . startScanEFeatures ( ) ; while ( model . taggerFGen . hasNextEFeature ( ) ) { Feature f = model . taggerFGen . nextEFeature ( ) ; if ( f . ftype == Feature . EDGE_FEATURE1 ) { Mi . mtrx [ f . yp ] [ f . y ] += model . lambda [ f . idx ] * f . val ; } } for ( int i = 0 ; i < Mi . rows ; i ++ ) { for ( int j = 0 ; j < Mi . cols ; j ++ ) { Mi . mtrx [ i ] [ j ] = Math . exp ( Mi . mtrx [ i ] [ j ] ) ; } } }
|
Compute mi .
|
38,105
|
public void computeVi ( List seq , int pos , DoubleVector Vi , boolean isExp ) { Vi . assign ( 0.0 ) ; model . taggerFGen . startScanSFeaturesAt ( seq , pos ) ; while ( model . taggerFGen . hasNextSFeature ( ) ) { Feature f = model . taggerFGen . nextSFeature ( ) ; if ( f . ftype == Feature . STAT_FEATURE1 ) { Vi . vect [ f . y ] += model . lambda [ f . idx ] * f . val ; } } if ( isExp ) { for ( int i = 0 ; i < Vi . len ; i ++ ) { Vi . vect [ i ] = Math . exp ( Vi . vect [ i ] ) ; } } }
|
Compute vi .
|
38,106
|
public int findMax ( PairDblInt [ ] cols ) { int maxIdx = 0 ; double maxVal = - 1.0 ; for ( int i = 0 ; i < numLabels ; i ++ ) { if ( cols [ i ] . first > maxVal ) { maxVal = cols [ i ] . first ; maxIdx = i ; } } return maxIdx ; }
|
Find max .
|
38,107
|
public void viterbiInference ( List seq ) { int i , j , k ; int seqLen = seq . size ( ) ; if ( seqLen <= 0 ) { return ; } if ( memorySize < seqLen ) { allocateMemory ( seqLen ) ; } computeVi ( seq , 0 , Vi , true ) ; for ( j = 0 ; j < numLabels ; j ++ ) { memory [ 0 ] [ j ] . first = Vi . vect [ j ] ; memory [ 0 ] [ j ] . second = j ; } divide ( memory [ 0 ] , sum ( memory [ 0 ] ) ) ; for ( i = 1 ; i < seqLen ; i ++ ) { computeVi ( seq , i , Vi , true ) ; for ( j = 0 ; j < numLabels ; j ++ ) { memory [ i ] [ j ] . first = 0.0 ; memory [ i ] [ j ] . second = 0 ; for ( k = 0 ; k < numLabels ; k ++ ) { double tempVal = memory [ i - 1 ] [ k ] . first * Mi . mtrx [ k ] [ j ] * Vi . vect [ j ] ; if ( tempVal > memory [ i ] [ j ] . first ) { memory [ i ] [ j ] . first = tempVal ; memory [ i ] [ j ] . second = k ; } } } divide ( memory [ i ] , sum ( memory [ i ] ) ) ; } int maxIdx = findMax ( memory [ seqLen - 1 ] ) ; ( ( Observation ) seq . get ( seqLen - 1 ) ) . modelLabel = maxIdx ; for ( i = seqLen - 2 ; i >= 0 ; i -- ) { ( ( Observation ) seq . get ( i ) ) . modelLabel = memory [ i + 1 ] [ maxIdx ] . second ; maxIdx = ( ( Observation ) seq . get ( i ) ) . modelLabel ; } }
|
Viterbi inference .
|
38,108
|
private void tokenize ( JCas jcas ) { Logger . printDetail ( component , "TreeTagger (tokenization) with: " + ttprops . abbFileName ) ; EnumSet < Flag > flags = Flag . getSet ( ttprops . languageSwitch ) ; TreeTaggerTokenizer ttt ; ttprops . abbFileName = "english-abbreviations" ; if ( ttprops . abbFileName != null ) { ttt = new TreeTaggerTokenizer ( ttprops . rootPath + ttprops . fileSeparator + "lib" + ttprops . fileSeparator + ttprops . abbFileName , flags ) ; } else { ttt = new TreeTaggerTokenizer ( null , flags ) ; } String docText = jcas . getDocumentText ( ) . replaceAll ( "\n\n" , "\nEMPTYLINE\n" ) ; List < String > tokenized = ttt . tokenize ( docText ) ; int tokenOffset = 0 ; for ( String s : tokenized ) { if ( ( ! ( s . equals ( "EMPTYLINE" ) ) ) && ( jcas . getDocumentText ( ) . indexOf ( s , tokenOffset ) < 0 ) ) { Logger . printError ( component , "Tokenization was interrupted because the token \"" + s + "\" could not be found in the original text. The reason for this might be " + "that the encoding of the document is not UTF-8. This token was skipped and " + "if it was part of a temporal expression, will not be extracted." ) ; continue ; } Token newToken = new Token ( jcas ) ; if ( s . equals ( "EMPTYLINE" ) ) { newToken . setBegin ( tokenOffset ) ; newToken . setEnd ( tokenOffset ) ; newToken . setPos ( "EMPTYLINE" ) ; if ( annotate_partofspeech ) { newToken . addToIndexes ( ) ; } } else { newToken . setBegin ( jcas . getDocumentText ( ) . indexOf ( s , tokenOffset ) ) ; newToken . setEnd ( newToken . getBegin ( ) + s . length ( ) ) ; newToken . addToIndexes ( ) ; tokenOffset = newToken . getEnd ( ) ; } } }
|
tokenizes a given JCas object s document text using the treetagger program and adds the recognized tokens to the JCas object .
|
38,109
|
private void tokenizeChinese ( JCas jcas ) { try { Process proc = ttprops . getChineseTokenizationProcess ( ) ; Logger . printDetail ( component , "Chinese tokenization: " + ttprops . chineseTokenizerPath ) ; BufferedReader in = new BufferedReader ( new InputStreamReader ( proc . getInputStream ( ) , "UTF-8" ) ) ; BufferedWriter out = new BufferedWriter ( new OutputStreamWriter ( proc . getOutputStream ( ) , "UTF-8" ) ) ; Integer tokenOffset = 0 ; String [ ] inSplits = jcas . getDocumentText ( ) . split ( "[\\r\\n]+" ) ; for ( String inSplit : inSplits ) { out . write ( inSplit ) ; out . newLine ( ) ; out . flush ( ) ; String s = in . readLine ( ) ; do { if ( s == null ) break ; String [ ] outSplits = s . split ( "\\s+" ) ; for ( String tok : outSplits ) { if ( jcas . getDocumentText ( ) . indexOf ( tok , tokenOffset ) < 0 ) throw new RuntimeException ( "Could not find token " + tok + " in JCas after tokenizing with Chinese tokenization script." ) ; Token newToken = new Token ( jcas ) ; newToken . setBegin ( jcas . getDocumentText ( ) . indexOf ( tok , tokenOffset ) ) ; newToken . setEnd ( newToken . getBegin ( ) + tok . length ( ) ) ; newToken . addToIndexes ( ) ; tokenOffset = newToken . getEnd ( ) ; } if ( ! in . ready ( ) ) break ; s = in . readLine ( ) ; } while ( true ) ; } in . close ( ) ; proc . destroy ( ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } }
|
tokenizes a given JCas object s document text using the chinese tokenization script and adds the recognized tokens to the JCas object .
|
38,110
|
public static void readAbbrList ( String dataFile , Map map ) throws IOException { BufferedReader fin = new BufferedReader ( new FileReader ( dataFile ) ) ; String line ; while ( ( line = fin . readLine ( ) ) != null ) { StringTokenizer strTok = new StringTokenizer ( line , " \t\r\n" ) ; if ( strTok . countTokens ( ) <= 0 ) { continue ; } String token = strTok . nextToken ( ) ; map . put ( token . toLowerCase ( ) , token . toLowerCase ( ) ) ; } }
|
Read abbr list .
|
38,111
|
public static List doFeatureGen ( Map map , String text , List markList , boolean label ) { markList . clear ( ) ; int nextPos = 0 ; while ( ( nextPos = StringUtils . findFirstOf ( text , ".!?" , nextPos + 1 ) ) != - 1 ) markList . add ( new Integer ( nextPos ) ) ; List results = new ArrayList ( ) ; for ( int i = 0 ; i < markList . size ( ) ; ++ i ) { int curPos = ( ( Integer ) markList . get ( i ) ) . intValue ( ) ; String record = genCPs ( map , text , curPos ) ; if ( label ) { int idx = StringUtils . findFirstNotOf ( text , " \t" , curPos + 1 ) ; if ( idx == - 1 || ( text . charAt ( idx ) == '\n' ) ) { record += " " + "y" ; } else record += " " + "n" ; } results . add ( record ) ; } return results ; }
|
Generate context predicates for a specified text return string representing the context predicates .
|
38,112
|
public void initialize ( Language language , DocumentType typeToProcess , OutputType outputType , String configPath ) { initialize ( language , typeToProcess , outputType , configPath , POSTagger . TREETAGGER ) ; }
|
Method that initializes all vital prerequisites
|
38,113
|
private void runIntervalTagger ( JCas jcas ) { logger . log ( Level . FINEST , "Running Interval Tagger..." ) ; Integer beforeAnnotations = jcas . getAnnotationIndex ( ) . size ( ) ; Properties settings = new Properties ( ) ; settings . put ( IntervalTagger . PARAM_LANGUAGE , language . getResourceFolder ( ) ) ; settings . put ( IntervalTagger . PARAM_INTERVALS , true ) ; settings . put ( IntervalTagger . PARAM_INTERVAL_CANDIDATES , false ) ; IntervalTaggerWrapper iTagger = new IntervalTaggerWrapper ( ) ; iTagger . initialize ( settings ) ; iTagger . process ( jcas ) ; Integer afterAnnotations = jcas . getAnnotationIndex ( ) . size ( ) ; logger . log ( Level . FINEST , "Annotation delta: " + ( afterAnnotations - beforeAnnotations ) ) ; }
|
Runs the IntervalTagger on the JCAS object .
|
38,114
|
public Process getTokenizationProcess ( File inputFile ) throws IOException { ArrayList < String > command = new ArrayList < String > ( ) ; command . add ( "perl" ) ; if ( this . utf8Switch != "" ) command . add ( this . utf8Switch ) ; command . add ( this . rootPath + this . fileSeparator + "cmd" + this . fileSeparator + this . tokScriptName ) ; if ( this . languageSwitch != "" ) command . add ( this . languageSwitch ) ; if ( new File ( this . rootPath + this . fileSeparator + "lib" + this . fileSeparator , this . abbFileName ) . exists ( ) ) { command . add ( "-a" ) ; command . add ( this . rootPath + this . fileSeparator + "lib" + this . fileSeparator + this . abbFileName ) ; } command . add ( inputFile . getAbsolutePath ( ) ) ; String [ ] commandStr = new String [ command . size ( ) ] ; command . toArray ( commandStr ) ; Process p = Runtime . getRuntime ( ) . exec ( commandStr ) ; return p ; }
|
This method creates a process with some parameters for the tokenizer script .
|
38,115
|
public void readDict ( BufferedReader fin ) throws IOException { dict . clear ( ) ; String line ; if ( ( line = fin . readLine ( ) ) == null ) { System . out . println ( "No dictionary size information" ) ; return ; } int dictSize = Integer . parseInt ( line ) ; if ( dictSize <= 0 ) { System . out . println ( "Invalid dictionary size" ) ; } System . out . println ( "Reading dictionary ..." ) ; for ( int i = 0 ; i < dictSize ; i ++ ) { line = fin . readLine ( ) ; if ( line == null ) { System . out . println ( "Invalid dictionary line" ) ; return ; } StringTokenizer strTok = new StringTokenizer ( line , " \t\r\n" ) ; int len = strTok . countTokens ( ) ; if ( len < 2 ) { continue ; } StringTokenizer cpTok = new StringTokenizer ( strTok . nextToken ( ) , ":" ) ; int cp = Integer . parseInt ( cpTok . nextToken ( ) ) ; int cpCount = Integer . parseInt ( cpTok . nextToken ( ) ) ; Element elem = new Element ( ) ; elem . count = cpCount ; elem . chosen = 1 ; while ( strTok . hasMoreTokens ( ) ) { StringTokenizer lbTok = new StringTokenizer ( strTok . nextToken ( ) , ":" ) ; int label = Integer . parseInt ( lbTok . nextToken ( ) ) ; int count = Integer . parseInt ( lbTok . nextToken ( ) ) ; int fidx = Integer . parseInt ( lbTok . nextToken ( ) ) ; CountFIdx cntFIdx = new CountFIdx ( count , fidx ) ; elem . lbCntFidxes . put ( new Integer ( label ) , cntFIdx ) ; } dict . put ( new Integer ( cp ) , elem ) ; } System . out . println ( "Reading dictionary (" + Integer . toString ( dict . size ( ) ) + " entries) completed!" ) ; line = fin . readLine ( ) ; }
|
Read dict .
|
38,116
|
public void writeDict ( PrintWriter fout ) throws IOException { Iterator it = null ; int count = 0 ; for ( it = dict . keySet ( ) . iterator ( ) ; it . hasNext ( ) ; ) { Integer cpInt = ( Integer ) it . next ( ) ; Element elem = ( Element ) dict . get ( cpInt ) ; if ( elem . chosen == 1 ) { count ++ ; } } fout . println ( Integer . toString ( count ) ) ; for ( it = dict . keySet ( ) . iterator ( ) ; it . hasNext ( ) ; ) { Integer cpInt = ( Integer ) it . next ( ) ; Element elem = ( Element ) dict . get ( cpInt ) ; if ( elem . chosen == 0 ) { continue ; } fout . print ( cpInt . toString ( ) + ":" + Integer . toString ( elem . count ) ) ; for ( Iterator lbIt = elem . lbCntFidxes . keySet ( ) . iterator ( ) ; lbIt . hasNext ( ) ; ) { Integer labelInt = ( Integer ) lbIt . next ( ) ; CountFIdx cntFIdx = ( CountFIdx ) elem . lbCntFidxes . get ( labelInt ) ; if ( cntFIdx . fidx < 0 ) { continue ; } fout . print ( " " + labelInt . toString ( ) + ":" + Integer . toString ( cntFIdx . count ) + ":" + Integer . toString ( cntFIdx . fidx ) ) ; } fout . println ( ) ; } fout . println ( Option . modelSeparator ) ; }
|
Write dict .
|
38,117
|
public void addDict ( int cp , int label , int count ) { Element elem = ( Element ) dict . get ( new Integer ( cp ) ) ; if ( elem == null ) { elem = new Element ( ) ; elem . count = count ; CountFIdx cntFIdx = new CountFIdx ( count , - 1 ) ; elem . lbCntFidxes . put ( new Integer ( label ) , cntFIdx ) ; dict . put ( new Integer ( cp ) , elem ) ; } else { elem . count += count ; CountFIdx cntFIdx = ( CountFIdx ) elem . lbCntFidxes . get ( new Integer ( label ) ) ; if ( cntFIdx == null ) { cntFIdx = new CountFIdx ( count , - 1 ) ; elem . lbCntFidxes . put ( new Integer ( label ) , cntFIdx ) ; } else { cntFIdx . count += count ; } } }
|
Adds the dict .
|
38,118
|
public void generateDict ( ) { if ( data . trnData == null ) { System . out . println ( "No data available for generating dictionary" ) ; return ; } for ( int i = 0 ; i < data . trnData . size ( ) ; i ++ ) { Observation obsr = ( Observation ) data . trnData . get ( i ) ; for ( int j = 0 ; j < obsr . cps . length ; j ++ ) { addDict ( obsr . cps [ j ] , obsr . humanLabel , 1 ) ; } } }
|
Generate dict .
|
38,119
|
public static List < String > getTokens ( String sentence ) { List < String > tokens = new ArrayList < String > ( ) ; StringBuilder buff = new StringBuilder ( ) ; for ( char c : sentence . toCharArray ( ) ) { if ( buff . length ( ) > 0 && spaces . indexOf ( c ) != - 1 ) { tokens . add ( buff . toString ( ) ) ; buff . setLength ( 0 ) ; } else if ( punctuation . indexOf ( c ) != - 1 ) { if ( buff . length ( ) > 0 ) { tokens . add ( buff . toString ( ) ) ; buff . setLength ( 0 ) ; } tokens . add ( String . valueOf ( c ) ) ; } else if ( spaces . indexOf ( c ) == - 1 ) { buff . append ( c ) ; } } if ( buff . length ( ) > 0 ) { tokens . add ( buff . toString ( ) ) ; } return tokens ; }
|
Takes a string and returns a List of all tokens contained within it . Any non - whitespace string of characters delimited by punctuation or whitespace is considered a token . Likewise every instance of a punctuation character is also a token .
|
38,120
|
public Integer addTokenAnnotation ( String tokenString , String fileId , Integer sentId , Integer tokId , Integer positionCounter , JCas jcas ) { Token token = new Token ( jcas ) ; if ( ! ( ( sentId == newTokSentNumber ) && ( tokId == newTokSentNumber ) ) ) { if ( USE_SPACES ) positionCounter = positionCounter + 1 ; } token . setBegin ( positionCounter ) ; positionCounter = positionCounter + tokenString . length ( ) ; token . setEnd ( positionCounter ) ; token . setTokenId ( tokId ) ; token . setSentId ( sentId ) ; token . setFilename ( fileId ) ; token . addToIndexes ( ) ; String id = fileId + "_" + sentId + "_" + tokId ; hmToken . put ( id , token ) ; return positionCounter ; }
|
Add token annotation to jcas
|
38,121
|
private Integer getNumberOfDocuments ( List < File > inputFiles ) throws ResourceInitializationException { String directory = ( String ) getConfigParameterValue ( PARAM_INPUTDIR ) ; String filename = directory + "/" + FILE_BASE_SEGMENTATION ; for ( File file : inputFiles ) { if ( file . getAbsolutePath ( ) . equals ( filename ) ) { try { String line ; BufferedReader bf = new BufferedReader ( new InputStreamReader ( new FileInputStream ( file ) , charset ) ) ; while ( ( line = bf . readLine ( ) ) != null ) { String docName = ( line . split ( "\t" ) ) [ 0 ] ; if ( ! ( filenames . contains ( docName ) ) ) { filenames . add ( docName ) ; } } bf . close ( ) ; } catch ( IOException e ) { throw new ResourceInitializationException ( e ) ; } } } int docCounter = filenames . size ( ) ; return docCounter ; }
|
count the number of different documents and save doc names in filenames
|
38,122
|
public int strId2Idx ( Map fmap ) { Integer idxInt = ( Integer ) fmap . get ( strId ) ; if ( idxInt != null ) { this . idx = idxInt . intValue ( ) ; } return this . idx ; }
|
Str id2 idx .
|
38,123
|
public int strId2IdxAdd ( Map fmap ) { strId2Idx ( fmap ) ; if ( idx < 0 ) { idx = fmap . size ( ) ; fmap . put ( strId , new Integer ( idx ) ) ; } return idx ; }
|
Str id2 idx add .
|
38,124
|
private void readRePatternResources ( ResourceMap hmResourcesRePattern , Boolean load_temponym_resources ) { InputStream is = null ; InputStreamReader isr = null ; BufferedReader br = null ; try { for ( String resource : hmResourcesRePattern . keySet ( ) ) { if ( ( ! ( resource . contains ( "Temponym" ) ) ) || ( ( load_temponym_resources ) && ( resource . contains ( "Temponym" ) ) ) ) { Logger . printDetail ( component , "Adding pattern resource: " + resource ) ; is = hmResourcesRePattern . getInputStream ( resource ) ; isr = new InputStreamReader ( is , "UTF-8" ) ; br = new BufferedReader ( isr ) ; LinkedList < String > patterns = new LinkedList < String > ( ) ; for ( String line ; ( line = br . readLine ( ) ) != null ; ) { if ( ! line . startsWith ( "//" ) && ! line . equals ( "" ) ) { patterns . add ( replaceSpaces ( line ) ) ; } } Collections . sort ( patterns , new Comparator < String > ( ) { public int compare ( String o1 , String o2 ) { String o1effective = o1 . replaceAll ( "\\[[^\\]]*\\]" , "X" ) . replaceAll ( "\\?" , "" ) . replaceAll ( "\\\\.(?:\\{([^\\}])+\\})?" , "X$1" ) ; String o2effective = o2 . replaceAll ( "\\[[^\\]]*\\]" , "X" ) . replaceAll ( "\\?" , "" ) . replaceAll ( "\\\\.(?:\\{([^\\}])+\\})?" , "X$1" ) ; if ( o1effective . length ( ) < o2effective . length ( ) ) return 1 ; else if ( o1effective . length ( ) > o2effective . length ( ) ) return - 1 ; else return 0 ; } } ) ; StringBuilder sb = new StringBuilder ( ) ; String devPattern = "" ; for ( String pat : patterns ) { sb . append ( "|" ) ; sb . append ( pat ) ; } devPattern = sb . toString ( ) ; hmAllRePattern . put ( resource , devPattern ) ; } else { Logger . printDetail ( component , "No Temponym Tagging selected. Skipping pattern resource: " + resource ) ; } } for ( String which : hmAllRePattern . keySet ( ) ) { if ( ( ! ( which . contains ( "Temponym" ) ) ) || ( ( load_temponym_resources ) && ( which . contains ( "Temponym" ) ) ) ) { finalizeRePattern ( which , hmAllRePattern . get ( which ) ) ; } } } catch ( IOException e ) { e . printStackTrace ( ) ; } finally { try { if ( br != null ) { br . close ( ) ; } if ( isr != null ) { isr . close ( ) ; } if ( is != null ) { is . close ( ) ; } } catch ( Exception e ) { e . printStackTrace ( ) ; } } }
|
READ THE REPATTERN FROM THE FILES . The files have to be defined in the HashMap hmResourcesRePattern .
|
38,125
|
private void finalizeRePattern ( String name , String rePattern ) { rePattern = rePattern . replaceFirst ( "\\|" , "" ) ; rePattern = rePattern . replaceAll ( "\\(([^\\?])" , "(?:$1" ) ; rePattern = "(" + rePattern + ")" ; rePattern = rePattern . replaceAll ( "\\\\" , "\\\\\\\\" ) ; hmAllRePattern . put ( name , rePattern ) ; }
|
Pattern containing regular expression is finalized i . e . created correctly and added to hmAllRePattern .
|
38,126
|
public int getSentId ( ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_sentId == null ) jcasType . jcas . throwFeatMissing ( "sentId" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getIntValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_sentId ) ; }
|
getter for sentId - gets
|
38,127
|
public int getFirstTokId ( ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_firstTokId == null ) jcasType . jcas . throwFeatMissing ( "firstTokId" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getIntValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_firstTokId ) ; }
|
getter for firstTokId - gets
|
38,128
|
public void setFirstTokId ( int v ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_firstTokId == null ) jcasType . jcas . throwFeatMissing ( "firstTokId" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; jcasType . ll_cas . ll_setIntValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_firstTokId , v ) ; }
|
setter for firstTokId - sets
|
38,129
|
public String getAllTokIds ( ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_allTokIds == null ) jcasType . jcas . throwFeatMissing ( "allTokIds" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_allTokIds ) ; }
|
getter for allTokIds - gets
|
38,130
|
public void setAllTokIds ( String v ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_allTokIds == null ) jcasType . jcas . throwFeatMissing ( "allTokIds" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_allTokIds , v ) ; }
|
setter for allTokIds - sets
|
38,131
|
public int getTimexInstance ( ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexInstance == null ) jcasType . jcas . throwFeatMissing ( "timexInstance" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getIntValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexInstance ) ; }
|
getter for timexInstance - gets
|
38,132
|
public void setTimexInstance ( int v ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexInstance == null ) jcasType . jcas . throwFeatMissing ( "timexInstance" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; jcasType . ll_cas . ll_setIntValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexInstance , v ) ; }
|
setter for timexInstance - sets
|
38,133
|
public String getTimexType ( ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexType == null ) jcasType . jcas . throwFeatMissing ( "timexType" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexType ) ; }
|
getter for timexType - gets
|
38,134
|
public void setTimexType ( String v ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexType == null ) jcasType . jcas . throwFeatMissing ( "timexType" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexType , v ) ; }
|
setter for timexType - sets
|
38,135
|
public String getTimexValue ( ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexValue == null ) jcasType . jcas . throwFeatMissing ( "timexValue" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexValue ) ; }
|
getter for timexValue - gets
|
38,136
|
public void setTimexValue ( String v ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexValue == null ) jcasType . jcas . throwFeatMissing ( "timexValue" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexValue , v ) ; }
|
setter for timexValue - sets
|
38,137
|
public String getFoundByRule ( ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_foundByRule == null ) jcasType . jcas . throwFeatMissing ( "foundByRule" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_foundByRule ) ; }
|
getter for foundByRule - gets
|
38,138
|
public void setFoundByRule ( String v ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_foundByRule == null ) jcasType . jcas . throwFeatMissing ( "foundByRule" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_foundByRule , v ) ; }
|
setter for foundByRule - sets
|
38,139
|
public String getTimexQuant ( ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexQuant == null ) jcasType . jcas . throwFeatMissing ( "timexQuant" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexQuant ) ; }
|
getter for timexQuant - gets
|
38,140
|
public void setTimexQuant ( String v ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexQuant == null ) jcasType . jcas . throwFeatMissing ( "timexQuant" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexQuant , v ) ; }
|
setter for timexQuant - sets
|
38,141
|
public String getTimexFreq ( ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexFreq == null ) jcasType . jcas . throwFeatMissing ( "timexFreq" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexFreq ) ; }
|
getter for timexFreq - gets
|
38,142
|
public void setTimexFreq ( String v ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexFreq == null ) jcasType . jcas . throwFeatMissing ( "timexFreq" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexFreq , v ) ; }
|
setter for timexFreq - sets
|
38,143
|
public String getTimexMod ( ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexMod == null ) jcasType . jcas . throwFeatMissing ( "timexMod" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexMod ) ; }
|
getter for timexMod - gets
|
38,144
|
public void setTimexMod ( String v ) { if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexMod == null ) jcasType . jcas . throwFeatMissing ( "timexMod" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexMod , v ) ; }
|
setter for timexMod - sets
|
38,145
|
public void generateFeatures ( ) { if ( features != null ) { features . clear ( ) ; } else { features = new ArrayList ( ) ; } if ( fmap != null ) { fmap . clear ( ) ; } else { fmap = new HashMap ( ) ; } if ( currentFeatures != null ) { currentFeatures . clear ( ) ; } else { currentFeatures = new ArrayList ( ) ; } if ( data . trnData == null || dict . dict == null ) { System . out . println ( "No data or dictionary for generating features" ) ; return ; } for ( int i = 0 ; i < data . trnData . size ( ) ; i ++ ) { Observation obsr = ( Observation ) data . trnData . get ( i ) ; for ( int j = 0 ; j < obsr . cps . length ; j ++ ) { Element elem = null ; CountFIdx cntFIdx = null ; elem = ( Element ) dict . dict . get ( new Integer ( obsr . cps [ j ] ) ) ; if ( elem != null ) { if ( elem . count <= option . cpRareThreshold ) { continue ; } cntFIdx = ( CountFIdx ) elem . lbCntFidxes . get ( new Integer ( obsr . humanLabel ) ) ; if ( cntFIdx != null ) { if ( cntFIdx . count <= option . fRareThreshold ) { continue ; } } else { continue ; } } else { continue ; } Feature f = new Feature ( obsr . humanLabel , obsr . cps [ j ] ) ; f . strId2Idx ( fmap ) ; if ( f . idx < 0 ) { addFeature ( f ) ; cntFIdx . fidx = f . idx ; elem . chosen = 1 ; } } } option . numFeatures = features . size ( ) ; }
|
Generate features .
|
38,146
|
public void readFeatures ( BufferedReader fin ) throws IOException { if ( features != null ) { features . clear ( ) ; } else { features = new ArrayList ( ) ; } if ( fmap != null ) { fmap . clear ( ) ; } else { fmap = new HashMap ( ) ; } if ( currentFeatures != null ) { currentFeatures . clear ( ) ; } else { currentFeatures = new ArrayList ( ) ; } String line ; if ( ( line = fin . readLine ( ) ) == null ) { System . out . println ( "Unknown number of features" ) ; return ; } int numFeatures = Integer . parseInt ( line ) ; if ( numFeatures <= 0 ) { System . out . println ( "Invalid number of features" ) ; return ; } System . out . println ( "Reading features ..." ) ; for ( int i = 0 ; i < numFeatures ; i ++ ) { line = fin . readLine ( ) ; if ( line == null ) { continue ; } StringTokenizer strTok = new StringTokenizer ( line , " " ) ; if ( strTok . countTokens ( ) != 4 ) { System . out . println ( i + " invalid feature line " ) ; continue ; } Feature f = new Feature ( line , data . cpStr2Int , data . lbStr2Int ) ; Integer fidx = ( Integer ) fmap . get ( f . strId ) ; if ( fidx == null ) { fmap . put ( f . strId , new Integer ( f . idx ) ) ; features . add ( f ) ; } else { fmap . put ( f . strId , new Integer ( f . idx ) ) ; features . add ( f ) ; } } System . out . println ( "Reading " + Integer . toString ( features . size ( ) ) + " features completed!" ) ; line = fin . readLine ( ) ; option . numFeatures = features . size ( ) ; }
|
Read features .
|
38,147
|
public void writeFeatures ( PrintWriter fout ) throws IOException { fout . println ( Integer . toString ( features . size ( ) ) ) ; for ( int i = 0 ; i < features . size ( ) ; i ++ ) { Feature f = ( Feature ) features . get ( i ) ; fout . println ( f . toString ( data . cpInt2Str , data . lbInt2Str ) ) ; } fout . println ( Option . modelSeparator ) ; }
|
Write features .
|
38,148
|
public void startScanFeatures ( Observation obsr ) { currentFeatures . clear ( ) ; currentFeatureIdx = 0 ; for ( int i = 0 ; i < obsr . cps . length ; i ++ ) { Element elem = ( Element ) dict . dict . get ( new Integer ( obsr . cps [ i ] ) ) ; if ( elem == null ) { continue ; } if ( ! ( elem . isScanned ) ) { Iterator it = elem . lbCntFidxes . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Integer labelInt = ( Integer ) it . next ( ) ; CountFIdx cntFIdx = ( CountFIdx ) elem . lbCntFidxes . get ( labelInt ) ; if ( cntFIdx . fidx >= 0 ) { Feature f = new Feature ( ) ; f . FeatureInit ( labelInt . intValue ( ) , obsr . cps [ i ] ) ; f . idx = cntFIdx . fidx ; elem . cpFeatures . add ( f ) ; } } elem . isScanned = true ; } for ( int j = 0 ; j < elem . cpFeatures . size ( ) ; j ++ ) { currentFeatures . add ( elem . cpFeatures . get ( j ) ) ; } } }
|
Start scan features .
|
38,149
|
public String classify ( String cps ) { String modelLabel = "" ; int i ; intCps . clear ( ) ; StringTokenizer strTok = new StringTokenizer ( cps , " \t\r\n" ) ; int count = strTok . countTokens ( ) ; for ( i = 0 ; i < count ; i ++ ) { String cpStr = strTok . nextToken ( ) ; Integer cpInt = ( Integer ) data . cpStr2Int . get ( cpStr ) ; if ( cpInt != null ) { intCps . add ( cpInt ) ; } } Observation obsr = new Observation ( intCps ) ; inference . classify ( obsr ) ; String lbStr = ( String ) data . lbInt2Str . get ( new Integer ( obsr . modelLabel ) ) ; if ( lbStr != null ) { modelLabel = lbStr ; } return modelLabel ; }
|
classify an observation .
|
38,150
|
public List classify ( List data ) { List list = new ArrayList ( ) ; for ( int i = 0 ; i < data . size ( ) ; i ++ ) { list . add ( classify ( ( String ) data . get ( i ) ) ) ; } return list ; }
|
classify a list of observation .
|
38,151
|
public void registerProcessor ( String processor , Priority prio ) { this . processorNames . get ( prio ) . add ( processor ) ; }
|
method to register a processor
|
38,152
|
public void initializeAllProcessors ( UimaContext aContext ) { for ( Priority prio : processorNames . keySet ( ) ) { for ( String pn : processorNames . get ( prio ) ) { try { Class < ? > c = Class . forName ( pn ) ; GenericProcessor p = ( GenericProcessor ) c . newInstance ( ) ; p . initialize ( aContext ) ; processors . get ( prio ) . add ( p ) ; } catch ( Exception exception ) { exception . printStackTrace ( ) ; Logger . printError ( component , "Unable to initialize registered Processor " + pn + ", got: " + exception . toString ( ) ) ; System . exit ( - 1 ) ; } } } this . initialized = true ; }
|
Based on reflection this method instantiates and initializes all of the registered Processors .
|
38,153
|
public void executeProcessors ( JCas jcas , ProcessorManager . Priority prio ) { if ( ! this . initialized ) { Logger . printError ( component , "Unable to execute Processors; initialization was not concluded successfully." ) ; System . exit ( - 1 ) ; } LinkedList < GenericProcessor > myList = processors . get ( prio ) ; for ( GenericProcessor gp : myList ) { try { gp . process ( jcas ) ; } catch ( Exception exception ) { exception . printStackTrace ( ) ; Logger . printError ( component , "Unable to process registered Processor " + gp . getClass ( ) . getName ( ) + ", got: " + exception . toString ( ) ) ; System . exit ( - 1 ) ; } } }
|
Based on reflection this method instantiates and executes all of the registered Processors .
|
38,154
|
public String getTimexValueEB ( ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_TimexValueEB == null ) jcasType . jcas . throwFeatMissing ( "TimexValueEB" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_TimexValueEB ) ; }
|
getter for TimexValueEB - gets
|
38,155
|
public void setTimexValueEB ( String v ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_TimexValueEB == null ) jcasType . jcas . throwFeatMissing ( "TimexValueEB" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_TimexValueEB , v ) ; }
|
setter for TimexValueEB - sets
|
38,156
|
public String getTimexValueLE ( ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_TimexValueLE == null ) jcasType . jcas . throwFeatMissing ( "TimexValueLE" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_TimexValueLE ) ; }
|
getter for TimexValueLE - gets
|
38,157
|
public void setTimexValueLE ( String v ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_TimexValueLE == null ) jcasType . jcas . throwFeatMissing ( "TimexValueLE" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_TimexValueLE , v ) ; }
|
setter for TimexValueLE - sets
|
38,158
|
public String getTimexValueEE ( ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_TimexValueEE == null ) jcasType . jcas . throwFeatMissing ( "TimexValueEE" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_TimexValueEE ) ; }
|
getter for TimexValueEE - gets
|
38,159
|
public void setTimexValueEE ( String v ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_TimexValueEE == null ) jcasType . jcas . throwFeatMissing ( "TimexValueEE" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_TimexValueEE , v ) ; }
|
setter for TimexValueEE - sets
|
38,160
|
public String getTimexValueLB ( ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_TimexValueLB == null ) jcasType . jcas . throwFeatMissing ( "TimexValueLB" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_TimexValueLB ) ; }
|
getter for TimexValueLB - gets
|
38,161
|
public void setTimexValueLB ( String v ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_TimexValueLB == null ) jcasType . jcas . throwFeatMissing ( "TimexValueLB" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_TimexValueLB , v ) ; }
|
setter for TimexValueLB - sets
|
38,162
|
public String getEmptyValue ( ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_emptyValue == null ) jcasType . jcas . throwFeatMissing ( "emptyValue" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_emptyValue ) ; }
|
getter for emptyValue - gets
|
38,163
|
public void setEmptyValue ( String v ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_emptyValue == null ) jcasType . jcas . throwFeatMissing ( "emptyValue" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_emptyValue , v ) ; }
|
setter for emptyValue - sets
|
38,164
|
public String getBeginTimex ( ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_beginTimex == null ) jcasType . jcas . throwFeatMissing ( "beginTimex" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_beginTimex ) ; }
|
getter for beginTimex - gets
|
38,165
|
public void setBeginTimex ( String v ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_beginTimex == null ) jcasType . jcas . throwFeatMissing ( "beginTimex" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_beginTimex , v ) ; }
|
setter for beginTimex - sets
|
38,166
|
public String getEndTimex ( ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_endTimex == null ) jcasType . jcas . throwFeatMissing ( "endTimex" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_endTimex ) ; }
|
getter for endTimex - gets
|
38,167
|
public void setEndTimex ( String v ) { if ( Timex3Interval_Type . featOkTst && ( ( Timex3Interval_Type ) jcasType ) . casFeat_endTimex == null ) jcasType . jcas . throwFeatMissing ( "endTimex" , "de.unihd.dbs.uima.types.heideltime.Timex3Interval" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Timex3Interval_Type ) jcasType ) . casFeatCode_endTimex , v ) ; }
|
setter for endTimex - sets
|
38,168
|
public void process ( JCas jcas ) { try { tagger . process ( jcas ) ; } catch ( AnalysisEngineProcessException e ) { e . printStackTrace ( ) ; } }
|
invokes the IntervalTagger s process method .
|
38,169
|
public String getTimexId ( ) { if ( Dct_Type . featOkTst && ( ( Dct_Type ) jcasType ) . casFeat_timexId == null ) jcasType . jcas . throwFeatMissing ( "timexId" , "de.unihd.dbs.uima.types.heideltime.Dct" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( Dct_Type ) jcasType ) . casFeatCode_timexId ) ; }
|
getter for timexId - gets
|
38,170
|
public void setTimexId ( String v ) { if ( Dct_Type . featOkTst && ( ( Dct_Type ) jcasType ) . casFeat_timexId == null ) jcasType . jcas . throwFeatMissing ( "timexId" , "de.unihd.dbs.uima.types.heideltime.Dct" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Dct_Type ) jcasType ) . casFeatCode_timexId , v ) ; }
|
setter for timexId - sets
|
38,171
|
private static void patternCompile ( ) { try { ptnNumber = Pattern . compile ( strNumberPattern ) ; ptnShortDate = Pattern . compile ( strShortDatePattern ) ; ptnLongDate = Pattern . compile ( strLongDatePattern ) ; ptnPercentage = Pattern . compile ( strPercentagePattern ) ; ptnCurrency = Pattern . compile ( strCurrencyPattern ) ; ptnViCurrency = Pattern . compile ( strViCurrencyPattern ) ; } catch ( PatternSyntaxException ex ) { System . err . println ( ex . getMessage ( ) ) ; System . exit ( 1 ) ; } }
|
Pattern compile .
|
38,172
|
private static String patternMatching ( String ptnName , String input ) { String suffix = "" ; if ( ptnNumber == null ) patternCompile ( ) ; Matcher matcher ; if ( ptnName . equals ( "number" ) ) { matcher = ptnNumber . matcher ( input ) ; if ( matcher . matches ( ) ) suffix = ":number" ; } else if ( ptnName . equals ( "short_date" ) ) { matcher = ptnShortDate . matcher ( input ) ; if ( matcher . matches ( ) ) suffix = ":short-date" ; } else if ( ptnName . equals ( "long_date" ) ) { matcher = ptnLongDate . matcher ( input ) ; if ( matcher . matches ( ) ) suffix = ":long-date" ; } else if ( ptnName . equals ( "percentage" ) ) { matcher = ptnPercentage . matcher ( input ) ; if ( matcher . matches ( ) ) suffix = ":percentage" ; } else if ( ptnName . equals ( "currency" ) ) { matcher = ptnCurrency . matcher ( input ) ; if ( matcher . matches ( ) ) suffix = ":currency" ; else { matcher = ptnViCurrency . matcher ( input ) ; if ( matcher . matches ( ) ) { suffix = ":currency" ; } } } return suffix ; }
|
Pattern matching .
|
38,173
|
public void parseVnSyllable ( String syll ) { strSyllable = syll ; strMainVowel = "" ; strSecondaryVowel = "" ; strFirstConsonant = "" ; strLastConsonant = "" ; iCurPos = 0 ; validViSyll = true ; parseFirstConsonant ( ) ; parseSecondaryVowel ( ) ; parseMainVowel ( ) ; parseLastConsonant ( ) ; }
|
Parses the vn syllable .
|
38,174
|
private void parseFirstConsonant ( ) { Iterator iter = alFirstConsonants . iterator ( ) ; while ( iter . hasNext ( ) ) { String strFirstCon = ( String ) iter . next ( ) ; if ( strSyllable . startsWith ( strFirstCon , iCurPos ) ) { strFirstConsonant = strFirstCon ; iCurPos += strFirstCon . length ( ) ; return ; } } strFirstConsonant = ZERO ; }
|
Parses the first consonant .
|
38,175
|
private void parseSecondaryVowel ( ) { if ( ! validViSyll ) return ; char curChar , nextChar ; if ( iCurPos > strSyllable . length ( ) - 1 ) { validViSyll = false ; return ; } curChar = strSyllable . charAt ( iCurPos ) ; if ( iCurPos == strSyllable . length ( ) - 1 ) nextChar = '$' ; else nextChar = strSyllable . charAt ( iCurPos + 1 ) ; TONE tone = TONE . NO_TONE ; int idx1 = vnVowels . indexOf ( curChar ) ; int idx2 = vnVowels . indexOf ( nextChar ) ; if ( idx1 == - 1 ) return ; tone = TONE . getTone ( idx1 % 6 ) ; curChar = vnVowels . charAt ( ( idx1 / 6 ) * 6 ) ; if ( idx2 == - 1 ) { strSecondaryVowel = ZERO ; return ; } nextChar = vnVowels . charAt ( ( idx2 / 6 ) * 6 ) ; if ( tone . getValue ( ) == TONE . NO_TONE . getValue ( ) ) tone = TONE . getTone ( idx2 % 6 ) ; if ( curChar == 'o' ) { if ( nextChar == 'a' || nextChar == 'e' ) { strSecondaryVowel += curChar ; iCurPos ++ ; } else strSecondaryVowel = ZERO ; return ; } else if ( curChar == 'u' ) { if ( nextChar != 'i' && nextChar != '$' ) { strSecondaryVowel += curChar ; iCurPos ++ ; } else strSecondaryVowel = ZERO ; return ; } }
|
Parses the secondary vowel .
|
38,176
|
private void parseMainVowel ( ) { if ( ! validViSyll ) return ; if ( iCurPos > strSyllable . length ( ) - 1 ) { validViSyll = false ; return ; } String strVowel = "" ; for ( int i = iCurPos ; i < strSyllable . length ( ) ; ++ i ) { int idx = vnVowels . indexOf ( strSyllable . charAt ( i ) ) ; if ( idx == - 1 ) break ; strVowel += vnVowels . charAt ( ( idx / 6 ) * 6 ) ; if ( tone . getValue ( ) == TONE . NO_TONE . getValue ( ) ) tone = TONE . getTone ( idx % 6 ) ; } Iterator iter = alMainVowels . iterator ( ) ; while ( iter . hasNext ( ) ) { String tempVowel = ( String ) iter . next ( ) ; if ( strVowel . startsWith ( tempVowel ) ) { strMainVowel = tempVowel ; iCurPos += tempVowel . length ( ) ; return ; } } validViSyll = false ; return ; }
|
Parses the main vowel .
|
38,177
|
private void parseLastConsonant ( ) { if ( ! validViSyll ) return ; if ( iCurPos > strSyllable . length ( ) ) strLastConsonant = ZERO ; String strCon = strSyllable . substring ( iCurPos , strSyllable . length ( ) ) ; if ( strCon . length ( ) > 3 ) { validViSyll = false ; return ; } Iterator iter = alLastConsonants . iterator ( ) ; while ( iter . hasNext ( ) ) { String tempLastCon = ( String ) iter . next ( ) ; if ( strCon . equals ( tempLastCon ) ) { strLastConsonant = tempLastCon ; iCurPos += strLastConsonant . length ( ) ; return ; } } strLastConsonant = ZERO ; if ( iCurPos >= strSyllable . length ( ) ) validViSyll = true ; else validViSyll = false ; return ; }
|
Parses the last consonant .
|
38,178
|
private static void initArrayList ( ArrayList al , String str ) { StringTokenizer strTknr = new StringTokenizer ( str , "|" ) ; while ( strTknr . hasMoreTokens ( ) ) { al . add ( strTknr . nextToken ( ) ) ; } }
|
Inits the array list .
|
38,179
|
public static void displayCopyright ( ) { System . out . println ( "Vietnamese Word Segmentation:" ) ; System . out . println ( "\tusing Conditional Random Fields" ) ; System . out . println ( "\ttesting our dataset of 8000 sentences with the highest F1-measure of 94%" ) ; System . out . println ( "Copyright (C) by Cam-Tu Nguyen {1,2} and Xuan-Hieu Phan {2}" ) ; System . out . println ( "{1}: College of Technology, Hanoi National University" ) ; System . out . println ( "{2}: Graduate School of Information Sciences, Tohoku University" ) ; System . out . println ( "Email: {ncamtu@gmail.com ; pxhieu@gmail.com}" ) ; System . out . println ( ) ; }
|
Display copyright .
|
38,180
|
public static void displayHelp ( ) { System . out . println ( "Usage:" ) ; System . out . println ( "\tCase 1: WordSegmenting -modeldir <model directory> -inputfile <input data file>" ) ; System . out . println ( "\tCase 2: WordSegmenting -modeldir <model directory> -inputdir <input data directory>" ) ; System . out . println ( "Where:" ) ; System . out . println ( "\t<model directory> is the directory contain the model and option files" ) ; System . out . println ( "\t<input data file> is the file containing input sentences that need to" ) ; System . out . println ( "\tbe tagged (each sentence on a line)" ) ; System . out . println ( "\t<input data directory> is the directory containing multiple input data files (.tkn)" ) ; System . out . println ( ) ; }
|
Display help .
|
38,181
|
public static Boolean checkInfrontBehind ( MatchResult r , Sentence s ) { Boolean ok = true ; if ( r . start ( ) > 1 ) { if ( ( s . getCoveredText ( ) . substring ( r . start ( ) - 2 , r . start ( ) ) . matches ( "\\d\\." ) ) ) { ok = false ; } } if ( r . start ( ) > 0 ) { if ( ( ( s . getCoveredText ( ) . substring ( r . start ( ) - 1 , r . start ( ) ) . matches ( "[\\w\\$\\+]" ) ) ) && ( ! ( s . getCoveredText ( ) . substring ( r . start ( ) - 1 , r . start ( ) ) . matches ( "\\(" ) ) ) ) { ok = false ; } } if ( r . end ( ) < s . getCoveredText ( ) . length ( ) ) { if ( ( s . getCoveredText ( ) . substring ( r . end ( ) , r . end ( ) + 1 ) . matches ( "[°\\w]") ) & ( ! ( s . getCoveredText ( ) . substring ( r . end ( ) , r . end ( ) + 1 ) . matches ( "\\)" ) ) ) ) { ok = false ; } if ( r . end ( ) + 1 < s . getCoveredText ( ) . length ( ) ) { if ( s . getCoveredText ( ) . substring ( r . end ( ) , r . end ( ) + 2 ) . matches ( "[\\.,]\\d" ) ) { ok = false ; } } } return ok ; }
|
Check token boundaries of expressions .
|
38,182
|
public void inferenceAll ( List data ) { System . out . println ( "Starting inference ..." ) ; long start , stop , elapsed ; start = System . currentTimeMillis ( ) ; for ( int i = 0 ; i < data . size ( ) ; i ++ ) { System . out . println ( "sequence " + Integer . toString ( i + 1 ) ) ; List seq = ( List ) data . get ( i ) ; inference ( seq ) ; } stop = System . currentTimeMillis ( ) ; elapsed = stop - start ; System . out . println ( "Inference " + Integer . toString ( data . size ( ) ) + " sequences completed!" ) ; System . out . println ( "Inference time: " + Double . toString ( ( double ) elapsed / 1000 ) + " seconds" ) ; }
|
Inference all .
|
38,183
|
public void readCpMaps ( BufferedReader fin ) throws IOException { if ( cpStr2Int != null ) { cpStr2Int . clear ( ) ; } else { cpStr2Int = new HashMap ( ) ; } if ( cpInt2Str != null ) { cpInt2Str . clear ( ) ; } else { cpInt2Str = new HashMap ( ) ; } String line ; if ( ( line = fin . readLine ( ) ) == null ) { System . out . println ( "No context predicate map size information" ) ; return ; } int numCps = Integer . parseInt ( line ) ; if ( numCps <= 0 ) { System . out . println ( "Invalid mapping size" ) ; return ; } System . out . println ( "Reading the context predicate maps ..." ) ; for ( int i = 0 ; i < numCps ; i ++ ) { line = fin . readLine ( ) ; if ( line == null ) { System . out . println ( "Invalid context predicate mapping line" ) ; return ; } StringTokenizer strTok = new StringTokenizer ( line , " \t\r\n" ) ; if ( strTok . countTokens ( ) != 2 ) { continue ; } String cpStr = strTok . nextToken ( ) ; String cpInt = strTok . nextToken ( ) ; cpStr2Int . put ( cpStr , new Integer ( cpInt ) ) ; cpInt2Str . put ( new Integer ( cpInt ) , cpStr ) ; } System . out . println ( "Reading context predicate maps (" + Integer . toString ( cpStr2Int . size ( ) ) + " entries) completed!" ) ; line = fin . readLine ( ) ; }
|
Read cp maps .
|
38,184
|
public void readLbMaps ( BufferedReader fin ) throws IOException { if ( lbStr2Int != null ) { lbStr2Int . clear ( ) ; } else { lbStr2Int = new HashMap ( ) ; } if ( lbInt2Str != null ) { lbInt2Str . clear ( ) ; } else { lbInt2Str = new HashMap ( ) ; } String line ; if ( ( line = fin . readLine ( ) ) == null ) { System . out . println ( "No label map size information" ) ; return ; } int numLabels = Integer . parseInt ( line ) ; if ( numLabels <= 0 ) { System . out . println ( "Invalid label map size" ) ; return ; } System . out . println ( "Reading label maps ..." ) ; for ( int i = 0 ; i < numLabels ; i ++ ) { line = fin . readLine ( ) ; if ( line == null ) { System . out . println ( "Invalid label map line" ) ; return ; } StringTokenizer strTok = new StringTokenizer ( line , " \t\r\n" ) ; if ( strTok . countTokens ( ) != 2 ) { continue ; } String lbStr = strTok . nextToken ( ) ; String lbInt = strTok . nextToken ( ) ; lbStr2Int . put ( lbStr , new Integer ( lbInt ) ) ; lbInt2Str . put ( new Integer ( lbInt ) , lbStr ) ; } System . out . println ( "Reading label maps (" + Integer . toString ( lbStr2Int . size ( ) ) + " entries) completed!" ) ; line = fin . readLine ( ) ; }
|
Read lb maps .
|
38,185
|
public int getSentenceId ( ) { if ( Sentence_Type . featOkTst && ( ( Sentence_Type ) jcasType ) . casFeat_sentenceId == null ) jcasType . jcas . throwFeatMissing ( "sentenceId" , "de.unihd.dbs.uima.types.heideltime.Sentence" ) ; return jcasType . ll_cas . ll_getIntValue ( addr , ( ( Sentence_Type ) jcasType ) . casFeatCode_sentenceId ) ; }
|
getter for sentenceId - gets
|
38,186
|
public void setSentenceId ( int v ) { if ( Sentence_Type . featOkTst && ( ( Sentence_Type ) jcasType ) . casFeat_sentenceId == null ) jcasType . jcas . throwFeatMissing ( "sentenceId" , "de.unihd.dbs.uima.types.heideltime.Sentence" ) ; jcasType . ll_cas . ll_setIntValue ( addr , ( ( Sentence_Type ) jcasType ) . casFeatCode_sentenceId , v ) ; }
|
setter for sentenceId - sets
|
38,187
|
public String getUri ( ) { if ( SourceDocInfo_Type . featOkTst && ( ( SourceDocInfo_Type ) jcasType ) . casFeat_uri == null ) jcasType . jcas . throwFeatMissing ( "uri" , "de.unihd.dbs.uima.types.heideltime.SourceDocInfo" ) ; return jcasType . ll_cas . ll_getStringValue ( addr , ( ( SourceDocInfo_Type ) jcasType ) . casFeatCode_uri ) ; }
|
getter for uri - gets
|
38,188
|
public int getOffsetInSource ( ) { if ( SourceDocInfo_Type . featOkTst && ( ( SourceDocInfo_Type ) jcasType ) . casFeat_offsetInSource == null ) jcasType . jcas . throwFeatMissing ( "offsetInSource" , "de.unihd.dbs.uima.types.heideltime.SourceDocInfo" ) ; return jcasType . ll_cas . ll_getIntValue ( addr , ( ( SourceDocInfo_Type ) jcasType ) . casFeatCode_offsetInSource ) ; }
|
getter for offsetInSource - gets
|
38,189
|
public void setOffsetInSource ( int v ) { if ( SourceDocInfo_Type . featOkTst && ( ( SourceDocInfo_Type ) jcasType ) . casFeat_offsetInSource == null ) jcasType . jcas . throwFeatMissing ( "offsetInSource" , "de.unihd.dbs.uima.types.heideltime.SourceDocInfo" ) ; jcasType . ll_cas . ll_setIntValue ( addr , ( ( SourceDocInfo_Type ) jcasType ) . casFeatCode_offsetInSource , v ) ; }
|
setter for offsetInSource - sets
|
38,190
|
@ SuppressWarnings ( { "unchecked" , "rawtypes" } ) public static void setProps ( Properties prop ) { properties = prop ; Iterator propIt = properties . entrySet ( ) . iterator ( ) ; while ( propIt . hasNext ( ) ) { Entry < String , String > entry = ( Entry < String , String > ) propIt . next ( ) ; properties . setProperty ( entry . getKey ( ) , entry . getValue ( ) . trim ( ) ) ; } }
|
Sets properties once
|
38,191
|
public boolean initSenSegmenter ( String modelDir ) { System . out . println ( "Initilize JVnSenSegmenter ..." ) ; vnSenSegmenter = new JVnSenSegmenter ( ) ; if ( ! vnSenSegmenter . init ( modelDir ) ) { System . out . println ( "Error while initilizing JVnSenSegmenter" ) ; vnSenSegmenter = null ; return false ; } return true ; }
|
Initialize the sentence segmetation for Vietnamese return true if the initialization is successful and false otherwise .
|
38,192
|
public boolean initSegmenter ( String modelDir ) { System . out . println ( "Initilize JVnSegmenter ..." ) ; System . out . println ( modelDir ) ; vnSegmenter = new CRFSegmenter ( ) ; try { vnSegmenter . init ( modelDir ) ; } catch ( Exception e ) { System . out . println ( "Error while initializing JVnSegmenter" ) ; vnSegmenter = null ; return false ; } return true ; }
|
Initialize the word segmetation for Vietnamese .
|
38,193
|
public boolean initPosTagger ( String modelDir ) { try { this . vnPosTagger = new MaxentTagger ( modelDir ) ; } catch ( Exception e ) { System . out . println ( "Error while initializing POS TAgger" ) ; vnPosTagger = null ; return false ; } return true ; }
|
Initialize the pos tagger for Vietnamese .
|
38,194
|
public String senSegment ( String text ) { String ret = text ; if ( vnSenSegmenter != null ) { ret = vnSenSegmenter . senSegment ( text ) ; } return ret . trim ( ) ; }
|
Do sentence segmentation .
|
38,195
|
public String senTokenize ( String text ) { String ret = text ; if ( isTokenization ) { ret = PennTokenizer . tokenize ( text ) ; } return ret . trim ( ) ; }
|
Do sentence tokenization .
|
38,196
|
public String wordSegment ( String text ) { String ret = text ; if ( vnSegmenter == null ) return ret ; ret = vnSegmenter . segmenting ( ret ) ; return ret ; }
|
Do word segmentation .
|
38,197
|
public String posTagging ( String text ) { String ret = text ; if ( vnPosTagger != null ) { ret = vnPosTagger . tagging ( text ) ; } return ret ; }
|
Do pos tagging .
|
38,198
|
public static WayPoint of ( final Latitude latitude , final Longitude longitude , final Length elevation , final Speed speed , final ZonedDateTime time , final Degrees magneticVariation , final Length geoidHeight , final String name , final String comment , final String description , final String source , final List < Link > links , final String symbol , final String type , final Fix fix , final UInt sat , final Double hdop , final Double vdop , final Double pdop , final Duration ageOfGPSData , final DGPSStation dgpsID ) { return new WayPoint ( latitude , longitude , elevation , speed , time , magneticVariation , geoidHeight , name , comment , description , source , links , symbol , type , fix , sat , hdop , vdop , pdop , ageOfGPSData , dgpsID , null ) ; }
|
Create a new way - point with the given parameter .
|
38,199
|
public static Location of ( final Point point ) { requireNonNull ( point ) ; return of ( point . getLatitude ( ) , point . getLongitude ( ) , point . getElevation ( ) . orElse ( null ) ) ; }
|
Create a new location form the given GPS point .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.