text
stringlengths
22
301k
@NODES _LINE @POST "urls.txt" << "https://state.1keydata.com/" << N("$text",4) << "\n"; single(); @RULES _xNIL <- href ### (1) \= ### (2) \" ### (3) _xWILD [fail=(\")] ### (4) \" ### (5) @@
@CODE G("icd_codes") = getconcept(findroot(),"icd_codes"); if (! G("icd_codes")) { G("icd_codes") = makeconcept(findroot(),"icd_codes"); } @@CODE
@NODES _split @POST L("code") = N("code", 1); L("term") = N("term", 1); if (!strcontains(".", L("code"))) { addEntryToHier(X("con"), L("code"), L("term")); } noop(); @RULES _xNIL <- _entry ### (1) @@
@NODES _LINE @RULES _sep <- \t @@
@MULTI _section _sentence _looseText @CHECK if (pnname(X()) == "_item") { fail(); } @POST excise(2,2); single(); @RULES _sentence <- _xWILD [fail=(_section _subsection _endSent _BLANKLINE)] ### (1) _xWILD [one trig match=(_endSent _BLANKLINE)] ### (2) @@ @CHECK if (pnname(X()) == "_item") { fail(); } @RULES _sentence <- _xWILD [fail=(_endSent _sentence _section _subsection _endSent _BLANKLINE)] ### (1) _init [trig] _xWILD [one matches=(\n \r)] @@
@NODES _NLPPP @RECURSE littoaction @POST rfalittoaction(1) single() @RULES _ACTION <- _LIT @@ @@RECURSE littoaction @RECURSE littoaction @POST rfalittoaction(1) single() @RULES _ACTION <- _LIT @@ @@RECURSE littoaction @POST rfaactions(2) single() @RULES _POSTS [base] <- _soPOST _xWILD [match=(_LIT _ACTION) recurse=(littoaction)] _eoPOST [opt] @@ _CHECKS [base] <- _soCHECK _xWILD [match=(_LIT _ACTION) recurse=(littoaction)] _eoCHECK [opt] @@
# This is the tagrules minipass @RECURSE tagrules # This rule in the minipass operates on the phrase of nodes that # matched _xWILD in the _tag rule. @RULES _keywords <-  keywords  \=  \(  _xALPHA [star]  \)  @@ @@RECURSE tagrules # This is the main rules area, called the Grammar Zone. # When this rule matches, it invokes a minipass to process # the nodes that matched the _xWILD wildcard. @RULES _tag <- \< _xWILD [nest=(tagrules)] \> @@
@NODES _LINE @PRE <1,1> cap(); @RULES # Ex: Alice _femaleName [layer=(_humanNamepart )] <- _xWILD [min=1 max=1 s match=("Alice" "Patricia" "Linda" "Barbara" "Elizabeth" "Jennifer" "Maria" "Susan" "Margaret" "Dorothy" "Lisa" "Nancy" "Karen" "Betty" "Helen" "Sandra" "Donna" "Carol" "Ruth" "Sharon" "Michelle" "Laura" "Sarah" "Kimberly" "Deborah" "Jessica" "Shirley" "Cynthia" "Angela" "Melissa" "Brenda" "Amy" "Anna" "Rebecca" "Virginia" "Kathleen" "Pamela" "Martha" "Debra" "Amanda" "Stephanie" "Carolyn" "Christine" "Marie" "Janet" "Catherine" "Frances" "Ann" "Joyce" "Diane" "Mary" "Julie" "Heather" "Teresa" "Doris" "Gloria" "Evelyn" "Jean" "Cheryl" "Mildred" "Katherine" "Joan" "Ashley" "Judith" "Rose" "Janice" "Kelly" "Nicole" "Judy" "Christina" "Kathy" "Theresa" "Beverly" "Denise" "Tammy" "Irene" "Jane" "Lori" "Rachel" "Marilyn" "Andrea" "Kathryn" "Louise" "Sara" "Anne" "Jacqueline" "Wanda" "Bonnie" "Julia" "Ruby" "Lois" "Tina" "Phyllis" "Norma" "Paula" "Diana" "Annie" "Lillian" "Emily" "Robin")] @@
@NODES _LINE @PRE <1,1> cap(); <6,6> cap(); <6,6> length(1); @POST group(6, 8, "_room"); single(); @RULES # Ex: Rm.\_#\_A _unitRoom <- _PostalUnit [s layer=("_unit")] \. [s] _xWHITE [star s] \# [s] _xWHITE [star s] _xALPHA [s] \- [s] _xNUM [s] @@ @PRE <1,1> cap(); @RULES # Ex: Rm.\_#\_ _unitRoom <- _PostalUnit [s layer=("_unit")] \. [s] _xWHITE [star s] \# [s] _xWHITE [star s] _xNUM [s layer=("_room")] @@ @PRE <1,1> cap(); <6,6> cap(); <6,6> length(1); @RULES # Ex: Rm.\_#\_B _unitRoom <- _PostalUnit [s layer=("_unit")] \. [s] _xWHITE [star s] \# [s] _xWHITE [star s] _xALPHA [s layer=("_room")] @@ @PRE <1,1> cap(); <5,5> cap(); <5,5> length(1); @POST group(5, 7, "_room"); single(); @RULES # Ex: Rm\_#\_A _unitRoom <- _PostalUnit [s layer=("_unit")] _xWHITE [star s] \# [s] _xWHITE [star s] _xALPHA [s] \- [s] _xNUM [s] @@ @PRE <1,1> cap(); @RULES # Ex: Rm\_#\_ _unitRoom <- _PostalUnit [s layer=("_unit")] _xWHITE [star s] \# [s] _xWHITE [star s] _xNUM [s layer=("_room")] @@ @PRE <1,1> cap(); <5,5> cap(); <5,5> length(1); @RULES # Ex: Rm\_#\_B _unitRoom <- _PostalUnit [s layer=("_unit")] _xWHITE [star s] \# [s] _xWHITE [star s] _xALPHA [s layer=("_room")] @@ @PRE <1,1> cap(); <4,4> cap(); <4,4> length(1); @POST group(4, 6, "_room"); single(); @RULES # Ex: Rm.\_A _unitRoom <- _PostalUnit [s layer=("_unit")] \. [s] _xWHITE [star s] _xALPHA [s] \- [s] _xNUM [s] @@ @PRE <1,1> cap(); @RULES # Ex: Rm.\_ _unitRoom <- _PostalUnit [s layer=("_unit")] \. [s] _xWHITE [star s] _xNUM [s layer=("_room")] @@ @PRE <1,1> cap(); <4,4> cap(); <4,4> length(1); @RULES # Ex: Rm.\_B _unitRoom <- _PostalUnit [s layer=("_unit")] \. [s] _xWHITE [star s] _xALPHA [s layer=("_room")] @@ @PRE <1,1> cap(); <3,3> cap(); <3,3> length(1); @POST group(3, 5, "_room"); single(); @RULES # Ex: Rm\_A _unitRoom <- _PostalUnit [s layer=("_unit")] _xWHITE [star s] _xALPHA [s] \- [s] _xNUM [s] @@ @PRE <1,1> cap(); @RULES _unitRoom <- _PostalUnit [s layer=("_unit")] _xWHITE [star s] _xNUM [s layer=("_room")] @@ @PRE <3,3> cap(); <3,3> length(1); @RULES # Ex: Rm\_B _unitRoom <- _PostalUnit [s layer=("_unit")] _xWHITE [star s] _xALPHA [s layer=("_room")] @@ @PRE <3,3> cap(); <3,3> length(1); @POST group(3, 5, "_room"); single(); @RULES _unitRoom <- \# [s] _xWHITE [star s] _xALPHA [s] \- [s] _xNUM [s] @@ @RULES _unitRoom <- \# [s] _xWHITE [star s] _xNUM [s layer=("_room")] @@ @PRE <3,3> cap(); <3,3> length(1); @RULES # Ex: #\_B _unitRoom <- \# [s] _xWHITE [star s] _xALPHA [s layer=("_room")] @@
@NODES _ROOT @POST splice(1,1) @RULES _xNIL <- _LINE @@
@CODE G("words") = findconcept(findroot(),"words"); if (!G("words")) G("words") = makeconcept(findroot(),"words"); rmchildren(G("words")); G("word") = makeconcept(G("words"),G("$inputhead")); @@CODE
@NODES _ROOT @PRE <2,2> uppercase(); @RULES _addendum <- _xSTART _xALPHA _xWILD _xWILD [min=4 matches=(_)] ### (1) @@
@NODES _LINE @POST S("language") = N("language",1); single(); @RULES _languageZone <- _language ### (1) _xWILD [plus match=(_headerZone)] ### (2) @@
@NODES _LINE @RULES # Ex: 02 _monthNum <- _xWILD [min=1 max=1 s match=("02" "2" "3" "4" "5" "6" "7" "8" "9" "01" "1" "03" "04" "05" "06" "07" "08" "09" "10" "11" "12")] @@
@NODES _ROOT @POST S("split") = N("$text", 1); excise(1,3); single(); @RULES _split <- _xWILD [one matches=(top rare)] _xPUNCT 50 _xWILD [plus fails=(top rare _xEND)] @@
@NODES _LINE @PRE <1,1> cap(); <3,3> cap(); <5,5> cap(); @RULES # Ex: Absolute\_Real\_Time,\_Inc. _company <- Absolute [s] _xWHITE [star s] Real [s] _xWHITE [star s] Time [s] \, [s] _xWHITE [star s] _companyRoot [s] @@ @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: General\_Instrument _company <- General [s] _xWHITE [star s] Instrument [s] @@
@CODE G("RadLex") = findconcept(findroot(),"RadLex"); if (!G("RadLex")) G("RadLex") = makeconcept(findroot(),"RadLex"); rmchildren(G("RadLex")); @@CODE
@PATH _ROOT _headerZone @RULES _catGroup <- _xWILD [plus match=(_liGroup _hiGroup)] ### (2) @@
@NODES _ROOT @POST excise(1,1); @RULES _xNIL <- _xWILD [fail=(_term)] ### (1) @@
# Check if a batch analysis process has just begun @CODE G("startflag") = batchstart(); @@CODE
@DECL ######## # FUNC: LOOKUPALPHADICTTOKZ # SUBJ: Look up an alphabetic word (with DICTTOKZ tokenize pass). # INPUT: Assumes there's a word-node to attach results to. # OUTPUT: # WARN: Modifies vars in the given node. # ERROR: DICTTOKZ neglects to put the "POS" attribute on # nodes. # [BUG, FEATURE, OMISSION, OVERSIGHT] # ######## lookupalphadicttokz( L("text"), # Lowercased text for word. L("node") # The node representing the word. ) { if (!(L("wordcon") = dictfindword(L("text")))) return; # Not handling unknown words here. pnreplaceval(L("node"),"wordcon", L("wordcon")); # Grab some attributes from kb. # if (L("nsem") = conval(L("wordcon"),"nounsem")) # 06/24/02 AM. { pnreplaceval(L("node"),"nounsem",L("nsem")); # 06/24/02 AM. if (inhierarchy(L("nsem"),"event")) # 06/26/02 AM. pnreplaceval(L("node"),"eventive",1); # 06/24/02 AM. pnreplaceval(L("node"),"sem",conceptname(L("nsem"))); } #domobject(L("text"),L("node"),L("nsem")); L("pos num") = pnvar(L("node"),"pos num"); if (L("pos num") > 0 && !pnvar(L("node"),"stem")) # [MOVED_UP] # 12/29/20 AM. { if (L("stem") = nvstem(L("text"))) pnreplaceval(L("node"),"stem",L("stem")); else pnreplaceval(L("node"),"stem",L("text")); } #L("pos") = 0; if (L("pos num") == 1) { if (pnvar(L("node"),"noun")) { pnreplaceval(L("node"),"pos","_noun"); return; } if (pnvar(L("node"),"verb")) { pnreplaceval(L("node"),"pos","_verb"); # Any -ing verb can be an eventive noun. if (strendswith(L("text"),"ing")) pnreplaceval(L("node"),"eventive",1); return; } ####### [REWORK] # # TODO: NORMALIZE THESE IN THE KB DICTIONARY AND IN ANALYZERS. if (pnvar(L("node"),"adj")) { pnreplaceval(L("node"),"pos","_adj"); chpos(L("node"),"JJ"); return; } if (pnvar(L("node"),"adv")) { pnreplaceval(L("node"),"pos","_adv"); chpos(L("node"),"RB"); return; } if (pnvar(L("node"),"prep")) { pnreplaceval(L("node"),"pos","_prep"); chpos(L("node"),"IN"); return; } if (pnvar(L("node"),"pro")) { pnreplaceval(L("node"),"pos","_pro"); chpos(L("node"),"PRP"); return; } if (pnvar(L("node"),"conj")) { pnreplaceval(L("node"),"pos","_conj"); chpos(L("node"),"CC"); return; } if (pnvar(L("node"),"det")) { pnreplaceval(L("node"),"pos","_det"); chpos(L("node"),"DT"); return; } if (pnvar(L("node"),"interj")) { pnreplaceval(L("node"),"pos","_interj"); chpos(L("node"),"UH"); return; } # TODO: SHORTEN THESE IN KB DICT. if (pnvar(L("node"),"adjective")) { pnreplaceval(L("node"),"pos","_adj"); chpos(L("node"),"JJ"); return; } if (pnvar(L("node"),"adverb")) { pnreplaceval(L("node"),"pos","_adv"); chpos(L("node"),"RB"); return; } if (pnvar(L("node"),"pronoun")) { pnreplaceval(L("node"),"pos","_pro"); chpos(L("node"),"PRP"); return; } if (pnvar(L("node"),"conjunction")) { pnreplaceval(L("node"),"pos","_conj"); # FIX. # 12/29/20 AM. chpos(L("node"),"CC"); return; } if (pnvar(L("node"),"interjection")) { pnreplaceval(L("node"),"pos","_interj"); chpos(L("node"),"UH"); return; } ####### [REWORK] # # else # { # Hmm. Found "abbreviation" as one unhandled # "misc.txt" << "abbreviation=" << L("text") << "\n"; # pnreplaceval(L("node"),"abbr",1); # L("pos") = 0; # } } } ######## # FUNC: LOOKUPALPHA # SUBJ: Look up an alphabetic word. # INPUT: Assumes there's a word-node to attach results to. # OUTPUT: # WARN: Modifies vars in the given node. # ERROR: DICTTOKZ neglects to put the "POS" attribute on ######## lookupalpha( L("text"), # Lowercased text for word. L("node") # The node representing the word. ) { if (!(L("wordcon") = dictfindword(L("text")))) return; # Not handling unknown words here. pnreplaceval(L("node"),"wordcon", L("wordcon")); # Grab some attributes from kb. # if (L("nsem") = conval(L("wordcon"),"nounsem")) # 06/24/02 AM. { pnreplaceval(L("node"),"nounsem",L("nsem")); # 06/24/02 AM. if (inhierarchy(L("nsem"),"event")) # 06/26/02 AM. pnreplaceval(L("node"),"eventive",1); # 06/24/02 AM. pnreplaceval(L("node"),"sem",conceptname(L("nsem"))); } #domobject(L("text"),L("node"),L("nsem")); L("vals") = findvals(L("wordcon"), "pos"); L("pos num") = 0; L("pos") = 0; while (L("vals")) { L("val") = getstrval(L("vals")); if (L("val") == "noun") { pnreplaceval(L("node"),"noun", 1); L("pos") = "_noun"; # eventivenoun(L("text"),L("node")); } else if (L("val") == "verb") { pnreplaceval(L("node"),"verb", 1); L("pos") = "_verb"; # Any -ing verb can be an eventive noun. if (strendswith(L("text"),"ing")) pnreplaceval(L("node"),"eventive",1); } else if (L("val") == "adjective") { pnreplaceval(L("node"),"adj", 1); L("pos") = "_adj"; } else if (L("val") == "adverb") { pnreplaceval(L("node"),"adv", 1); L("pos") = "_adv"; } else if (L("val") == "prep") { pnreplaceval(L("node"),"prep", 1); L("pos") = "_prep"; } else if (L("val") == "pronoun") { pnreplaceval(L("node"),"pro", 1); L("pos") = "_pro"; } else if (L("val") == "conj" || L("val") == "conjunction") # 03/20/02 AM. { pnreplaceval(L("node"),"conj", 1); L("pos") = "_conj"; } else if (L("val") == "interj") { pnreplaceval(L("node"),"interj", 1); L("pos") = "_interj"; } else if (L("val") == "det") { pnreplaceval(L("node"),"det", 1); L("pos") = "_det"; } else { # Hmm. Found "abbreviation" as one unhandled # "misc.txt" << "abbreviation=" << L("text") << "\n"; pnreplaceval(L("node"),"abbr",1); # L("pos") = 0; } L("vals") = nextval(L("vals")); ++L("pos num"); } pnreplaceval(L("node"),"pos num", L("pos num")); if (L("pos") && !pnvar(L("node"),"pos")) # 06/05/05 AM. pnreplaceval(L("node"),"pos",L("pos")); if (L("pos num") > 0 && !pnvar(L("node"),"stem")) { if (L("stem") = nvstem(L("text"))) pnreplaceval(L("node"),"stem",L("stem")); else pnreplaceval(L("node"),"stem",L("text")); } } ######## # FUNC: VGAGREE # ARGS: Top-level nodes assumed to bear info. # RET: L("ret")[0] - 1 if agreement, else 0. # L("ret")[1] - 1 if passive voice, else 0. # L("ret")[2] - "past", "present", "future", 0. # L("ret")[3] = "inf","-en","-ed","-edn","-ing","-s", or 0. # First element's conjugation. (see notes). # May want progressive, etc. "was eating..." # Would like the FAILURE POINT, so that I know where to flag # no glomming verbs left and no glomming verbs right. # NOTE: For constructs like "Should John be eating", will # consider that "be eating" has good agreement. But # may want to return a flag saying it is "incomplete". # Should John be eating => inf-start. # Has John eaten => en-start. # Was John eating => ing-start. # Was John eaten => en-start. # Some hints: # modal => needs inf. # have => needs en. # be => needs en (passive) or ing. # being => needs en. # OPT: May want to internalize this in NLP++ for speed, at some # point. ######## vgagree(L("m"),L("h"),L("b"),L("being"),L("v")) { #if (L("m")) # "vg.txt" << "m=" << pnvar(L("m"),"$text") << "\n"; #if (L("v")) # "vg.txt" << "v=" << pnvar(L("v"),"$text") << "\n"; L("ret")[0] = 1; # Agreement till proved otherwise. L("ret")[3] = 0; # Set return vals to zero. L("need") = 0; # What's needed next. # Vals = "inf","en","en-ing". L("first") = 0; # First element seen. L("last") = 0; # Last element seen. if (L("m")) # MODAL { # Need to set can-could type of modality here, or "future". L("need") = "inf"; if (!L("first")) L("first") = L("m"); L("last") = L("m"); } if (L("h")) # HAVE { if (L("need") == "inf" && !pnvar(L("h"),"inf") ) { # eg, "WILL HAD". L("ret")[0] = 0; # NO AGREEMENT. return L("ret"); } L("need") = "en"; if (!L("first")) L("first") = L("h"); L("last") = L("h"); } if (L("b")) # BE { if (L("need") == "inf") { if (!pnvar(L("b"),"inf")) { # eg, "WILL BEEN". L("ret")[0] = 0; # NO AGREEMENT. return L("ret"); } } else if (L("need") == "en" # BUG FIX. # 07/10/02 AM. && !pnvar(L("b"),"-en") ) { # eg, "HAVE ARE". L("ret")[0] = 0; # NO AGREEMENT. return L("ret"); } L("need") = "en-ing"; if (!L("first")) L("first") = L("b"); L("last") = L("b"); } if (L("being")) # BEING { if (L("need") == "inf") { # eg, "WILL BEING". L("ret")[0] = 0; # NO AGREEMENT. return L("ret"); } else if (L("need") == "en") # BUG FIX. # 07/10/02 AM. { # eg, "HAVE BEING". L("ret")[0] = 0; # NO AGREEMENT. return L("ret"); } # else 0 or "en-ing" are OK. L("need") = "en"; if (!L("first")) L("first") = L("being"); L("last") = L("being"); } if (L("v")) # VERB { L("vsuff") = vconj(L("v")); if (!L("vsuff")) { # eg, "WAS BEEN". L("ret")[0] = 0; # NO AGREEMENT. return L("ret"); } if (L("need") == "inf") { if (!vconjq(L("v"),"inf")) { # eg, "WILL ATE". L("ret")[0] = 0; # NO AGREEMENT. return L("ret"); } } else if (L("need") == "en") # BUG FIX. # 07/10/02 AM. { if (!vconjq(L("v"),"-en")) { # eg, "HAVE ATE". L("ret")[0] = 0; # NO AGREEMENT. return L("ret"); } } else if (L("need") == "en-ing") { if (L("vsuff") == "-edn" || L("vsuff") == "-en" ) { L("ret")[1] = 1; # SETS PASSIVE VOICE FOR VG. } else if (L("vsuff") == "-ing") { } else { # eg, "WAS ATE". L("ret")[0] = 0; # NO AGREEMENT. return L("ret"); } } # else 0 is OK. if (!L("first")) L("first") = L("v"); L("last") = L("v"); } # TODO: Use first and last to determine need to the left # and need to the right of this verb group. eg, for # resolving question syntax to left, unknown words to right. return L("ret"); } ######## # FUNC: VCONJQ # SUBJ: Get needed verb conjugation. # ARGS: Verb node. # RET: 1 if matched need, else 0. # ASS: Assumes all irregular verb forms are in (l_)common.pat # (Could place this knowledge in kb also, for easy # reference...) # NOTE: VARIANT OF VCONJ TO HANDLE AMBIGUOUS VERBS LIKE "set". # For need == "-edn", requiring exact match. This will enable # user to tailor calls more flexibly. # If asking for "-edn", get anything that qualifies. ######## vconjq(L("v"), L("need")) { if (!L("v") || !L("need")) { "err.txt" << "vcongj: "; "err.txt" << L("need") << " " << phrasetext(); "err.txt" << " " << G("$passnum") << "," << G("$rulenum") << "\n"; return 0; } if (pnname(L("v")) == "_vg") L("v") = pnvar(L("v"),"verb node"); # 05/20/07 AM. if (!L("v")) { "err.txt" << "vcongj: "; "err.txt" << L("need") << " " << phrasetext(); "err.txt" << " " << G("$passnum") << "," << G("$rulenum") << "\n"; return 0; } # Exact match. if (pnvar(L("v"),L("need"))) return 1; # OPT: May want to collapse all this to one var! if (pnvar(L("v"),"-edn")) # Irregular verb +edn ending. { if (L("need") == "-en" || L("need") == "-ed") return 1; return 0; # Assume no match. } if (L("need") == "-edn") { if (pnvar(L("v"),"-en") || pnvar(L("v"),"-ed")) # Changing to an OR. # 05/28/07 AM. return 1; } # If any nonmatching conjugation present, assume failure. # (Features should be complete, if present.) if (pnvar(L("v"),"-en")) # Irregular +en return 0; if (pnvar(L("v"),"inf")) # Exception like "need". return 0; if (pnvar(L("v"),"-ed")) # Irregular like "ate". return 0; if (pnvar(L("v"),"-ing")) return 0; if (pnvar(L("v"),"-s")) return 0; # Moved this down here. # if (pnvar(L("v"),"stem") == "be") # Special case. { # Todo: Can now handle all these.... return 0; } # NOT IRREGULAR, SO USE LITERAL TEXT. # Need a convention for getting just the verb's text. # eg in something like "haven't". L("vtext") = nodetext(L("v")); #"vg.txt" << "vconj text=" << L("vtext") << "\n"; if (!L("vtext")) return 0; if (strendswith(L("vtext"),"ed")) { if (L("need") == "-edn" || L("need") == "-en" || L("need") == "-ed") return 1; # "Normal" verb with ambiguous ed ending. return 0; } if (strendswith(L("vtext"),"ing")) return L("need") == "-ing"; #if (suffix(L("vtext"),"s")) # "s" is a proper suffix. if (nvsuff(L("vtext")) == "-s") # 06/15/06 AM. return L("need") == "-s"; return L("need") == "inf"; # Assume found uninflected form. } ######## # FUNC: VCONJ # SUBJ: Get verb conjugation. # ARGS: Verb node. # RET: "inf","-s","-ing","-edn","-ed","-en", or 0. # ASS: Assumes all irregular verb forms are in (l_)common.pat # (Could place this knowledge in kb also, for easy # reference...) # May not work as desired for verbs like be. # Need to handle highly ambiguous, eg, "set". ######## vconj(L("v")) { if (!L("v")) return 0; # OPT: May want to collapse all this to one var! if (pnvar(L("v"),"-edn")) # Irregular verb +edn ending. return "-edn"; if (pnvar(L("v"),"-en")) # Irregular +en return "-en"; if (pnvar(L("v"),"inf")) # Exception like "need". return "inf"; if (pnvar(L("v"),"-ed")) # Irregular like "ate". return "-ed"; if (pnvar(L("v"),"-ing")) return "-ing"; if (pnvar(L("v"),"-s")) return "-s"; # Moved this down here. # if (pnvar(L("v"),"stem") == "be") # Special case. return 0; # NOT IRREGULAR, SO USE LITERAL TEXT. # Need a convention for getting just the verb's text. # eg in something like "haven't". L("vtext") = nodetext(L("v")); #"vg.txt" << "vconj text=" << L("vtext") << "\n"; if (!L("vtext")) return 0; if (strendswith(L("vtext"),"ed")) return "-edn"; # "Normal" verb with ambiguous ed ending. if (strendswith(L("vtext"),"ing")) return "-ing"; #if (suffix(L("vtext"),"s")) # "s" is a proper suffix. if (nvsuff(L("vtext")) == "-s") # 06/15/06 AM. return "-s"; return "inf"; # Assume found uninflected form. } ############## ## QADDSTR ## SUBJ: Add string to array if unique. ## RET: New array. ## NOTE: Assume array of non-empty values. ############## qaddstr(L("txt"),L("arr")) { if (!L("txt")) return L("arr"); if (!L("arr")) return L("txt"); L("ii") = 0; while (L("elt") = L("arr")[L("ii")] ) { L("t") = L("arr")[L("ii")]; if (strequalnocase(L("txt"),L("t"))) return L("arr"); # String aleady there. ++L("ii"); } # Didn't find text, so add to end. L("arr")[L("ii")] = L("txt"); return L("arr"); } ############## ## QADDVALUE ## SUBJ: Add value to array if unique. ## RET: New array. ## NOTE: Assume array of non-empty values. ############## qaddvalue(L("val"),L("arr")) { if (!L("val")) return L("arr"); if (!L("arr")) return L("val"); L("ii") = 0; while (L("v") = L("arr")[L("ii")] ) { if (L("v") == L("val")) return L("arr"); # Value aleady there. ++L("ii"); } # Didn't find text, so add to end. L("arr")[L("ii")] = L("val"); return L("arr"); } ############## ## ADDVALUE ## SUBJ: Add value to array, no checking. ## RET: New array. ## NOTE: Assume array of non-empty values. ############## addvalue(L("val"),L("arr")) { if (!L("val")) return L("arr"); if (!L("arr")) return L("val"); L("len") = arraylength(L("arr")); # Didn't find text, so add to end. L("arr")[L("len")] = L("val"); return L("arr"); } ############## ## QADDCONVAL ## SUBJ: Add to kb concept list if unique. ## RET: 1 if unique and added, else 0 (redundant or error). ## NOTE: ############## qaddconval( L("con"), # Concept holding list of cons. L("key"), # Field name holding list. L("conval") # Value to add to list. ) { if (!L("con") || !L("key") || !L("conval")) return 0; if (!(L("val") = findvals(L("con"),L("key")) )) { # No values, so add new concept to list. addconval(L("con"),L("key"),L("conval")); return 1; } while (L("val")) { L("c") = getconval(L("val")); # If redundant, done. if (L("c") == L("conval")) return 0; L("val") = nextval(L("val")); } # Did not find concept in list, so add it. # NLP++: A way to add concept at end of list. addconval(L("con"),L("key"),L("conval")); return 1; } ############## ## QADDSTRVAL ## SUBJ: Add to kb concept list if unique. ## RET: 1 if unique and added, else 0 (redundant or error). ## NOTE: ############## qaddstrval( L("con"), # Concept holding list of cons. L("key"), # Field name holding list. L("strval") # Value to add to list. ) { if (!L("con") || !L("key") || !L("strval")) return 0; if (!(L("val") = findvals(L("con"),L("key")) )) { # No values, so add new concept to list. addstrval(L("con"),L("key"),L("strval")); return 1; } while (L("val")) { L("c") = getstrval(L("val")); # If redundant, done. if (L("c") == L("strval")) return 0; L("val") = nextval(L("val")); } # Did not find concept in list, so add it. # NLP++: A way to add concept at end of list. addstrval(L("con"),L("key"),L("strval")); return 1; } ############## ## NAMEINLIST ## SUBJ: See if name is in given array and count. ## RET: [0] 1 if present, 0 if absent. ## [1] Position in list, if present. ############## nameinlist(L("txt"),L("arr"),L("count")) { if (!L("txt") || !L("count")) return 0; L("ii") = 0; while (L("ii") < L("count")) { L("t") = L("arr")[L("ii")]; if (strequalnocase(L("txt"),L("t"))) { L("res")[0] = 1; L("res")[1] = L("ii"); return L("res"); } ++L("ii"); } return 0; } ############## ## SUBNAMEINLIST ## SUBJ: See if name is in (or starts) given array and count. ## RET: [0] 1 if present, 0 if absent. ## [1] Position in list, if present. ############## subnameinlist(L("txt"),L("arr"),L("count")) { if (!L("txt") || !L("count")) return 0; L("utxt") = strtoupper(L("txt")); L("ii") = 0; while (L("ii") < L("count")) { L("t") = L("arr")[L("ii")]; if (strequalnocase(L("txt"),L("t"))) { L("res")[0] = 1; L("res")[1] = L("ii"); return L("res"); } else { L("ut") = strtoupper(L("t")); if (G("dev")) "subname.txt" << L("ut") << " vs " << L("utxt") << "\n"; if (strstartswith(L("ut"),L("utxt"))) { L("res")[0] = 1; L("res")[1] = L("ii"); return L("res"); } else if (strstartswith(L("utxt"),L("ut"))) { # Todo: May want to update organization name... # Todo: Need a function for looking up org vs indiv. L("res")[0] = 1; L("res")[1] = L("ii"); return L("res"); } } ++L("ii"); } return 0; } ############## ## MINIMUM ## SUBJ: Get minimum of two numbers. ## RET: Smaller number. ## WARN: "min" is a C++ function name, and so won't compile. ############## minimum(L("a"),L("b")) { if (L("a") < L("b")) return L("a"); return L("b"); } ############## ## MAXIMUM ## SUBJ: Get maximum of two numbers. ## RET: Larger number. ## WARN: "max" is a C++ function name, and so won't compile. ############## maximum(L("a"),L("b")) { if (L("a") > L("b")) return L("a"); return L("b"); } ############## ## ABSOLUTE ## SUBJ: Get absolute value. ############## absolute(L("num")) { if (L("num") >= 0) return L("num"); L("x") = -L("num"); #return -L("num"); # NLP++ error. return L("x"); } ######## # FUNC: MONTHTONUM # SUBJ: Convert month to number. # INPUT: # OUTPUT: ######## monthtonum( L("text") # Lowercased text for word. ) { if (L("text") == "january" || L("text") == "jan") return 1; else if (L("text") == "february" || L("text") == "feb") return 2; else if (L("text") == "march" || L("text") == "mar") return 3; else if (L("text") == "april" || L("text") == "apr") return 4; else if (L("text") == "may") return 5; else if (L("text") == "june" || L("text") == "jun") return 6; else if (L("text") == "july" || L("text") == "jul") return 7; else if (L("text") == "august" || L("text") == "aug") return 8; else if (L("text") == "september" || L("text") == "sep" || L("text") == "sept") return 9; else if (L("text") == "october" || L("text") == "oct") return 10; else if (L("text") == "november" || L("text") == "nov") return 11; else if (L("text") == "december" || L("text") == "dec") return 12; else return 0; } ############## ## MONTHTONUMSTR ## SUBJ: Convert month to number string. ## RET: mm = month number as two digit string. ############## monthtonumstr(L("str")) { if (!L("str")) return "00"; L("str") = str(L("str")); # Make sure it's a string. L("str") = strtolower(L("str")); if (L("str") == "january" || L("str") == "jan") return "01"; if (L("str") == "february" || L("str") == "feb") return "02"; if (L("str") == "march" || L("str") == "mar") return "03"; if (L("str") == "april" || L("str") == "apr") return "04"; if (L("str") == "may") return "05"; if (L("str") == "june" || L("str") == "jun") return "06"; if (L("str") == "july" || L("str") == "jul") return "07"; if (L("str") == "august" || L("str") == "aug") return "08"; if (L("str") == "september" || L("str") == "sept" || L("str") == "sep") return "09"; if (L("str") == "october" || L("str") == "oct") return "10"; if (L("str") == "november" || L("str") == "nov") return "11"; if (L("str") == "december" || L("str") == "dec") return "12"; return "00"; } ############## ## GETRANGE ## SUBJ: Get real node for a range of rule element numbers. ## RET: [0] first node in range. ## [1] last node in range. ## [2] ord of first node in range. ## [3] ord of last node in range. ## NOTE: More practical than getrangenodes. ## KEY: real range, real first, real last. ## NLP++: NLP++ should provide this information. ############## getrange(L("len")) # Length of rule. (Or MAXIMUM LENGTH OF BATCH) { if (!L("len")) return 0; if (N(1)) { L("f") = eltnode(1); L("l") = lasteltnode(1); L("of") = 1; L("ol") = 1; } if (L("len") == 1) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(2)) { if (!L("f")) L("f") = eltnode(2); L("l") = lasteltnode(2); if (!L("of")) L("of") = 2; L("ol") = 2; } if (L("len") == 2) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(3)) { if (!L("f")) L("f") = eltnode(3); L("l") = lasteltnode(3); if (!L("of")) L("of") = 3; L("ol") = 3; } if (L("len") == 3) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(4)) { if (!L("f")) L("f") = eltnode(4); L("l") = lasteltnode(4); if (!L("of")) L("of") = 4; L("ol") = 4; } if (L("len") == 4) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(5)) { if (!L("f")) L("f") = eltnode(5); L("l") = lasteltnode(5); if (!L("of")) L("of") = 5; L("ol") = 5; } if (L("len") == 5) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(6)) { if (!L("f")) L("f") = eltnode(6); L("l") = lasteltnode(6); if (!L("of")) L("of") = 6; L("ol") = 6; } if (L("len") == 6) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(7)) { if (!L("f")) L("f") = eltnode(7); L("l") = lasteltnode(7); if (!L("of")) L("of") = 7; L("ol") = 7; } if (L("len") == 7) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(8)) { if (!L("f")) L("f") = eltnode(8); L("l") = lasteltnode(8); if (!L("of")) L("of") = 8; L("ol") = 8; } if (L("len") == 8) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(9)) { if (!L("f")) L("f") = eltnode(9); L("l") = lasteltnode(9); if (!L("of")) L("of") = 9; L("ol") = 9; } if (L("len") == 9) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(10)) { if (!L("f")) L("f") = eltnode(10); L("l") = lasteltnode(10); if (!L("of")) L("of") = 10; L("ol") = 10; } if (L("len") == 10) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(11)) { if (!L("f")) L("f") = eltnode(11); L("l") = lasteltnode(11); if (!L("of")) L("of") = 11; L("ol") = 11; } if (L("len") == 11) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(12)) { if (!L("f")) L("f") = eltnode(12); L("l") = lasteltnode(12); if (!L("of")) L("of") = 12; L("ol") = 12; } if (L("len") == 12) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(13)) { if (!L("f")) L("f") = eltnode(13); L("l") = lasteltnode(13); if (!L("of")) L("of") = 13; L("ol") = 13; } if (L("len") == 13) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(14)) { if (!L("f")) L("f") = eltnode(14); L("l") = lasteltnode(14); if (!L("of")) L("of") = 14; L("ol") = 14; } if (L("len") == 14) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(15)) { if (!L("f")) L("f") = eltnode(15); L("l") = lasteltnode(15); if (!L("of")) L("of") = 15; L("ol") = 15; } if (L("len") == 15) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(16)) { if (!L("f")) L("f") = eltnode(16); L("l") = lasteltnode(16); if (!L("of")) L("of") = 16; L("ol") = 16; } if (L("len") == 16) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(17)) { if (!L("f")) L("f") = eltnode(17); L("l") = lasteltnode(17); if (!L("of")) L("of") = 17; L("ol") = 17; } if (L("len") == 17) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(18)) { if (!L("f")) L("f") = eltnode(18); L("l") = lasteltnode(18); if (!L("of")) L("of") = 18; L("ol") = 18; } if (L("len") == 18) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(19)) { if (!L("f")) L("f") = eltnode(19); L("l") = lasteltnode(19); if (!L("of")) L("of") = 19; L("ol") = 19; } if (L("len") == 19) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (N(20)) { if (!L("f")) L("f") = eltnode(20); L("l") = lasteltnode(20); if (!L("of")) L("of") = 20; L("ol") = 20; } if (L("len") == 20) { L("res")[0] = L("f"); L("res")[1] = L("l"); L("res")[2] = L("of"); L("res")[3] = L("ol"); return L("res"); } if (G("dev")) "error.txt" << "Rule length not handled=" << L("len") << "\n"; return 0; } ############## ## GETRANGENODES ## SUBJ: Get real node for a range of rule element numbers. ## RET: [0] first node in range. ## [1] last node in range. ## [2] ord of first node in range. ## [3] ord of last node in range. ## KEY: real range, real first, real last. ############## getrangenodes(L("ofirst"),L("olast")) { if (!L("ofirst") || !L("olast")) return 0; if (L("ofirst") > L("olast")) return 0; # Todo: Flag error. # Fetch the nodes. Can assume optional ends, then look # toward the middle for a real node! Would simplify # code that accounts for optionals. Try it now.... # Account for multi-element nodes also. L("first") = eltnode(L("ofirst")); L("last") = lasteltnode(L("olast")); if (!L("first")) { L("ii") = 1 + L("ofirst"); while (L("ii") <= L("olast")) { L("first") = eltnode(L("ii")); if (L("first")) { L("ofirst") = L("ii"); # New start ord. L("ii") = L("olast") + 1; # Terminate loop. } else ++L("ii"); } } if (!L("first")) return 0; if (!L("last")) { L("ii") = L("olast") - 1; while (L("ii") >= L("ofirst")) { L("last") = lasteltnode(L("ii")); if (L("last")) { L("olast") = L("ii"); # New end ord. L("ii") = L("ofirst") - 1; # Terminate loop. } else --L("ii"); } } L("res")[0] = L("first"); L("res")[1] = L("last"); L("res")[2] = L("ofirst"); L("res")[3] = L("olast"); return L("res"); # Return array. } ######## # FUNC: NODETEXT # ARGS: Get text of a node. # RET: text or 0. # ASS: Using variable called "text" if node's text was # created explicitly. # WARN: GETTING LOWERCASED TEXT. ######## nodetext(L("n")) { #if (!L("n")) # return 0; #L("text") = pnvar(L("n"),"text"); # If var present, use it. #if (!L("text")) # return strtolower(pnvar(L("n"),"$text")); # Get from tokens. #return L("text"); return nodetreetext(L("n")); # 10/25/10 AM. } ############## ## NODETEXT ## SUBJ: Get plain or corrected text for node. ## RET: text for node. ## NOTE: Text may be original, cleaned, and/or corrected. ## Confidence may be associated with these. ############## #nodetext(L("n")) #{ #if (!L("n")) # return 0; #L("txt") = pnvar(L("n"),"text"); # Cleaned/corrected text. #if (L("txt")) # return L("txt"); #return pnvar(L("n"),"$text"); # As-is from text buffer. #} ############## ## NODERAWTEXT ## SUBJ: Get AS-IS text for node. ## RET: text for node. ## NOTE: Text may be original, cleaned, and/or corrected. ## Confidence may be associated with these. ## OPT: RETURNS AS-IS ORIGINAL TEXT SNIPPET FROM INPUT BUFFER. # ############## noderawtext(L("n")) { if (!L("n")) return 0; # LIMIT SIZE OF TEXT FETCHED FROM A NODE. # FIX. # L("max") = 255; # *DELIVER*. # FIX. # 08/08/12 AM. L("start") = pnvar(L("n"),"$ostart"); # 08/08/12 AM. L("end") = pnvar(L("n"),"$oend"); # 08/08/12 AM. if ((L("end") - L("start")) > L("max")) # 08/08/12 AM. L("end") = L("start") + L("max"); # 08/08/12 AM. if (L("end") < 0 || L("start") < 0) # CHECKING # 11/15/12 AM. return 0; # 11/15/12 AM. if (L("end") < L("start")) return 0; # CHECKING # 11/15/12 AM. return inputrange(L("start"),L("end")); # 08/08/12 AM. } ############## ## NODETREETEXT(NEWERFORREFERENCE) ############## nodetreetextNEWERFORREFERENCE(L("n")) { if (!L("n")) return 0; # Some nodes get a text built for them. # if (L("tt") = pnvar(L("n"),"text")) return L("tt"); # 02/27/13 AM. # LIMIT SIZE OF TEXT FETCHED FROM A NODE. # FIX. # L("max") = 255; # *DELIVER*. # FIX. # 08/08/12 AM. L("start") = pnvar(L("n"),"$ostart"); # 08/08/12 AM. L("end") = pnvar(L("n"),"$oend"); # 08/08/12 AM. if ((L("end") - L("start")) > L("max")) # 08/08/12 AM. L("end") = L("start") + L("max"); # 08/08/12 AM. if (L("end") < 0 || L("start") < 0) # CHECKING # 11/15/12 AM. return 0; # 11/15/12 AM. if (L("end") < L("start")) return 0; # CHECKING # 11/15/12 AM. return inputrange(L("start"),L("end")); # 08/08/12 AM. } ############## ## NODETREETEXT ## SUBJ: Get plain, corrected, or modified text for node. ## RET: text for node. ## NOTE: Text may be original, cleaned, and/or corrected. ## Confidence may be associated with these. ############## nodetreetext(L("n")) { if (!L("n")) return 0; L("txt") = pnvar(L("n"),"text"); # Cleaned/corrected text. if (L("txt")) return L("txt"); #return pnvar(L("n"),"$treetext"); # As-is from parse tree. return prosify(L("n"),"text"); # 09/07/08 AM. } ############## ## NODESTREETEXT ## SUBJ: Get plain or corrected text for a range of nodes. ## RET: text for nodes. ## NOTE: Text may be original, cleaned, and/or corrected. ## "NOSP" - if two nodes are not separated by whitespace, should ## set to 1. Else zero. ## "text" - If this is present on a node, then it is used without ## traversing inside the node. ############## nodestreetext( L("s"), # Start of range. L("e") # End of range ) { if (!L("s")) return 0; if (L("e")) L("z") = pnnext(L("e")); L("n") = L("s"); L("str") = 0; G("LEN") = 0; # TRACK AND LIMIT COLLECTED LENGTH TO G("MAX"). while (L("n") && L("n") != L("z")) { L("str") = prosifyrec( L("n"), # Root node of subtree. "text", # Name of user-supplied text field. L("str"), # The glommed string so far. 1 # Flag if root or not. ); if (G("LEN") >= G("MAX")) {G("LEN") = 0; return L("str"); } # TRACK AND LIMIT. L("n") = pnnext(L("n")); } return L("str"); } # The old nodestreetext. #if (!L("s")) # return 0; # #if (L("e")) # L("e") = pnnext(L("e")); # #while (L("s") && L("s") != L("e")) # { # L("txt") = L("txt") + nodetreetext(L("s")); # L("s") = pnnext(L("s")); # } #return L("txt"); ############## ## PHRASETREETEXT ## SUBJ: Get plain or corrected text for node's top level phrase. ## RET: text for node. ## NOTE: Follow given "root" node down till a "text" ## variable is seen or until it branches out. Then ## traverse at that branched level. ############## phrasetreetext(L("root")) { return prosify(L("root"),"text"); #if (!L("root")) # return 0; # #L("n") = L("root"); #L("done") = 0; #while (L("n") && !L("done")) # { # L("txt") = pnvar(L("n"),"text"); # Cleaned/corrected text. # if (L("txt")) # return L("txt"); # if (pnnext(L("n"))) # L("done") = 1; # else # L("n") = pndown(L("n")); # Go down a level. # } # #if (!L("n")) # return nodetreetext(L("root")); # #return nodestreetext(pndown(L("n")),0); } ############## ## STRDETAG ## SUBJ: Get rid of HTML tags from a string. ## RET: str = trimmed string. ## NOTE: Assuming no lone < and > signs. ############## strdetag(L("str")) { if (!L("str")) return 0; # Build a new string. L("new") = 0; # Traverse string one char at a time. OPT=inefficient in # NLP++ currently. L("len") = strlength(L("str")); L("ii") = 0; # Current position in string. L("ch") = strpiece(L("str"),0,0); # Lookahead char. L("outside") = 1; # If outside of a tag. while (L("ch")) { if (L("ch") == ">") L("outside") = 1; else if (L("ch") == "<") L("outside") = 0; else if (!L("outside")) # Inside a tag. ; # Ignore chars inside tag. else # Outside a tag. L("new") = L("new") + L("ch"); ++L("ii"); # Next char position. if (L("ii") >= L("len")) return L("new"); L("ch") = strpiece(L("str"),L("ii"),L("ii")); } return L("new"); } ############## ## PNPARENT ## SUBJ: Fetch parse tree node's parent. ############## pnparent(L("n")) { if (!L("n")) return 0; # Get first node in list. while (L("x") = pnprev(L("n"))) { L("n") = L("x"); } return pnup(L("n")); } ######## # FUNC: NUMBER # SUBJ: Check singular/plural/any for noun. # ARGS: Noun node. # RET: "singular", "plural", "any", or 0. # ASS: Should return any for mass nouns, things like sheep... # TODO: Irregular plurals. radii, cherubim, etc. ######## number(L("n")) { if (!L("n")) { return 0; } if (pnvar(L("n"),"-s")) return "plural"; L("num") = pnvar(L("n"),"number"); if (L("num")) return L("num"); L("ntext") = nodetext(L("n")); if (!L("ntext")) { return 0; } L("stem") = pnvar(L("n"),"stem"); if (!L("stem")) L("stem") = nvstem(L("ntext")); #"stem.txt" << "stem,text=" << L("stem") << "," << L("ntext") <<"\n"; if (strendswith(L("ntext"),"s") && L("stem") != L("ntext") ) return "plural"; return "singular"; } ######## # FUNC: SINGULAR # SUBJ: True if noun can be singular. # ARGS: Noun node. # RET: 1 or 0. # ASS: Should return true for mass nouns, things like sheep... # TODO: Irregular plurals. radii, cherubim, etc. ######## singular(L("n")) { if (!L("n")) return 0; if (pnvar(L("n"),"-s")) return 0; L("pl") = pnvar(L("n"),"number"); if (L("pl") == "singular") return 1; else if (L("pl") == "plural") return 0; if (pnvar(L("n"),"number") == "singular") return 1; L("ntext") = nodetext(L("n")); if (!L("ntext")) return 0; L("stem") = pnvar(L("n"),"stem"); if (!L("stem")) L("stem") = nvstem(L("ntext")); if (strendswith(L("ntext"),"s") && L("stem") != L("ntext") ) return 0; return 1; } ######## # FUNC: PLURAL # SUBJ: True if noun can be plural. # ARGS: Noun node. # RET: 1 or 0. # ASS: Should return true for mass nouns, things like sheep... # TODO: Irregular plurals. radii, cherubim, etc. ######## plural(L("n")) { if (!L("n")) return 0; if (pnvar(L("n"),"-s")) return 1; if (pnvar(L("n"),"number") == "plural") return 1; L("ntext") = nodetext(L("n")); if (!L("ntext")) return 0; L("stem") = pnvar(L("n"),"stem"); if (!L("stem")) L("stem") = nvstem(L("ntext")); if (strendswith(L("ntext"),"s") && L("stem") != L("ntext") ) return 1; return 0; } ######## # FUNC: NUMBERV # SUBJ: See if verb needs singular/plural noun. ######## numberv(L("n")) { if (!L("n")) return 0; if (pnname(L("n")) == "_vg") { L("d") = down(L("n")); if (pnname(L("d")) == "_modal") return "any"; L("t") = nodetext(L("d")); if (pnname(L("d")) == "_have") { if (L("t") == "have") return "plural"; else if (L("t") == "has") return "singular"; else return "any"; } if (pnname(L("d")) == "_be") { if (L("t") == "am") return "singular"; if (L("t") == "are") return "plural"; if (L("t") == "is") return "singular"; if (L("t") == "was") return "singular"; if (L("t") == "were") return "plural"; } if (pnname(L("d")) == "_being") return "any"; L("n") = pnvar(L("n"),"verb node"); if (!L("n")) return 0; } if (vconjq(L("n"),"-s")) return "singular"; if (vconjq(L("n"),"inf")) return "plural"; # Assume 3rd person noun previous. L("t") = nodetext(L("n")); "numv.txt" << L("t") << "\n"; if (L("t") == "am") return "singular"; if (L("t") == "are") return "plural"; if (L("t") == "is") return "singular"; if (L("t") == "was") return "singular"; if (L("t") == "were") return "plural"; if (L("t") == "have") return "plural"; else if (L("t") == "has") return "singular"; else return "any"; return "any"; } ######## # FUNC: NUMBERSAGREE # SUBJ: True if range of nodes agree in numbering. # RET: Agreed on number or 0 if fail. # NOTE: Changing this to be for a noun phrase. "Is this NP complete" (sic) # If no singular indicators, then end of noun phrase must be plural. ######## numbersagree(L("start"),L("end")) { if (!L("start") || !L("end")) { "err.txt" << "numbersagree: " << phrasetext() << "\n"; "err.txt" << " " << G("$passnum") << "," << G("$rulenum") << "\n"; return 0; } if (L("start") == L("end")) { L("need") = number(L("start")); if (L("need")) return L("need"); return "any"; # Node agrees with itself. } L("last") = L("end"); L("end") = pnnext(L("end")); L("n") = L("start"); L("agree") = 1; # Agreement so far. L("need") = "any"; # Constraint to be satisfied. while (L("n") != L("end")) { L("nm") = pnname(L("n")); L("number") = 0; # Reset. if (L("nm") == "_noun" && L("n") != L("last")) ; # IGNORE nonhead nouns. else if (pnvar(L("n"),"number")) { L("number") = pnvar(L("n"),"number"); if (pnvar(L("n"),"mass-ok")) L("mass-ok") = 1; } else if (pnvar(L("n"),"num")) { L("num") = pnvar(L("n"),"num"); if (L("num") == 1) L("number") = "singular"; else L("number") = "plural"; L("mass-ok") = 0; } # else if (pnvar(L("n"),"noun")) # L("number") = number(L("n")); else if (strisdigit(pnvar(L("n"),"$text"))) # 10/19/04 AM. { L("nm") = num(pnvar(L("n"),"$text")); if (L("nm") == 1) L("number") = "singular"; else L("number") = "plural"; L("mass-ok") = 0; } # Should also check final alpha (ie as noun...) # else if (L("n") == L("last")) { L("number") = number(L("n")); } if (L("number")) { if (L("need") == "any") L("need") = L("number"); else if (L("need") != L("number")) { if (!L("mass-ok") || !massnoun(L("n"))) return 0; # Disagreement. } } L("n") = pnnext(L("n")); } if (L("need")) return L("need"); return "any"; # Agreement. } ######## # FUNC: MASSNOUN # SUBJ: True if node can be a mass noun. # RET:######## massnoun(L("n")) { if (!L("n")) return 0; if (pnvar(L("n"),"mass")) return 1; L("t") = nodetext(L("n")); if (!L("t")) return 0; if (pnvar(L("n"),"noun") || pnvar(L("n"),"unknown")) { if (strendswith(L("t"),"ing")) return 1; } return 0; } ######## # FUNC: NONLITERAL # SUBJ: Check if node is nonliteral (should be NLP++ fn...). ######## nonliteral(L("n")) { if (!L("n")) return 0; L("nm") = pnname(L("n")); if (strpiece(L("nm"),0,0) == "_") return 1; return 0; } ######## # FUNC: LITERAL # SUBJ: See if node is literal. # NOTE: Check if node is literal (should be NLP++ fn...). ######## literal(L("n")) { if (!L("n")) return 0; L("name") = pnname(L("n")); if (strpiece(L("name"),0,0) != "_") return 1; if (strlength(L("name")) == 1) # Name is the underscore char. return 1; return 0; } ###### # FN: PROSIFYSIMPLE # SUBJ: Convert subtree to a prose-like string. # NOTE: Assuming all blanks are gone. # Simple one-level traverse of children. ##### prosifysimple(L("root")) { if (!L("root")) return 0; L("node") = pndown(L("root")); if (!L("node")) return pnvar(L("root"),"$text"); # Traverse nodes, sticking in spaces. L("noafter") = 0; L("str") = pnvar(L("node"),"$text"); while (L("node") = pnnext(L("node")) ) { # Need ispunct. L("txt") = pnvar(L("node"),"$text"); if (L("txt") != "." && L("txt") != "," && L("txt") != "-" && L("txt") != "'" && !L("noafter") ) L("str") = L("str") + " "; # Add space BEFORE current word. if (L("txt") == "-" || L("txt") == "'") L("noafter") = 1; # No space after, also. else L("noafter") = 0; L("str") = L("str") + pnvar(L("node"),"$text"); } return L("str"); } ###### # FN: PROSIFY # SUBJ: Convert subtree to a prose-like string. # NOTE: User-supplied text field for nodes with a rebuilt # text. Assume space after node unless next is sentence # punctuation. If space in tree, use that. # TODO: What to do with existing whitespace. # Options to generate newlines. # NLP++ function like PNPROSE to hard-wire this. # RET: catenated string. # RECURSIVE. ##### prosify( L("root"), # Node whose text we're fetching. L("field") # Name of user-supplied text field on nodes. ) { if (!L("root")) return 0; G("LEN") = 0; # TRACK AND LIMIT COLLECTED LENGTH TO G("MAX"). return prosifyrec(L("root"),L("field"),0,1); } ###### # FN: PROSIFYREC # SUBJ: Convert subtree to a prose-like string. # NOTE: Recursive part of prosify. # RET: catenated string. ##### #prosifyrec( # L("root"), # L("field"), # Name of user-supplied text field. # L("str"), # The glommed string so far. # L("flag") # Flag if root or not. # ) #{ #if (!L("root")) # return L("str"); # #if (L("field")) # { # L("txt") = pnvar(L("root"),L("field")); # # if (L("str") && L("txt")) # { # if (pnvar(L("root"),"NOSP")) # L("str") = L("str") + L("txt"); # else # L("str") = L("str") + " " + L("txt"); # } # else if (L("txt")) # L("str") = L("txt"); # # if (L("txt") && L("flag")) # return L("str"); # Done! # } # #L("nm") = pnname(L("root")); # NLP++: Need strispunct. # If literal vs nonliteral. #L("ch") = strpiece(L("nm"),0,0); #if (L("txt")) # Done with current node. # ; #else if (L("ch") != "_" || L("nm") == "_") # Literal. Alpha, num, punct, etc. # { # # Above, lone underscore is a punctuation, not a nonliteral. # if (!L("str")) # L("str") = L("nm"); # else if (pnvar(L("root"),"NOSP")) # L("str") = L("str") + L("nm"); # else if (L("ch") == " " || L("ch") == "\t" || L("ch") == "\n") # # If text is whitespace. # # ; # IGNORE WHITESPACE # # else # L("str") = L("str") + " " + L("nm"); # } #else # Nonliteral. # # Handle my subtree. # L("str") = prosifyrec(pndown(L("root")),L("field"),L("str"),0); # #if (pnprev(L("root"))) # return L("str"); # # First in a list, handle the list. #if (L("flag")) # return L("str"); #while (L("root") = pnnext(L("root"))) # L("str") = prosifyrec(L("root"),L("field"),L("str"),1); #return L("str"); #} prosifyrec( L("root"), L("field"), # Name of user-supplied text field. L("str"), # The glommed string so far. L("flag") # Flag if root or not. ) { if (!L("root")) return L("str"); if (L("field")) { L("txt") = pnvar(L("root"),L("field")); if (L("str") && L("txt")) { if (!pnvar(L("root"),"NOSP")) { ++G("LEN"); L("str") = L("str") + " "; if (G("LEN") >= G("MAX")) return L("str"); # TRACK AND LIMIT. } # NOSP is true or we've already added its space char. L("len") = strlength(L("txt")); # TRACK AND LIMIT. L("tot") = G("LEN") + L("len"); # TRACK AND LIMIT. # if (pnvar(L("root"),"NOSP")) # { # L("str") = L("str") + L("txt"); # When life was a little simpler. if (L("tot") < G("MAX")) # TRACK AND LIMIT. { L("str") = L("str") + L("txt"); G("LEN") = L("tot"); # TRACK AND LIMIT. } else if (L("tot") > G("MAX")) # TRACK AND LIMIT. { L("del") = G("MAX") - G("LEN"); L("xx") = strpiece(L("txt"),0,L("del")-1); L("str") = L("str") + L("xx"); G("LEN") = G("MAX"); # TRACK AND LIMIT. return L("str"); } else # Exactly equal max length. { L("str") = L("str") + L("txt"); G("LEN") = G("MAX"); # TRACK AND LIMIT. return L("str"); } # } } # END if (L("str") && L("txt")) else if (L("txt")) L("str") = L("txt"); if (L("txt") && L("flag")) return L("str"); # Done! } # END if L("field") # TRACK AND LIMIT. Presumably there are limits on node name length & token length. L("nm") = pnname(L("root")); # NLP++: Need strispunct. # If literal vs nonliteral. L("ch") = strpiece(L("nm"),0,0); if (L("txt")) # Done with current node. ; else if (L("ch") != "_" || L("nm") == "_") # Literal. Alpha, num, punct, etc. { # Above, lone underscore is a punctuation, not a nonliteral. if (L("str") && !pnvar(L("root"),"NOSP")) { ++G("LEN"); L("str") = L("str") + " "; if (G("LEN") >= G("MAX")) return L("str"); # TRACK AND LIMIT. } # NOSP is true or we've already added its space char. Or we had no str. L("len") = strlength(L("nm")); # TRACK AND LIMIT. L("tot") = G("LEN") + L("len"); # TRACK AND LIMIT. # Flag if whitespace literal. if (L("ch") == " " || L("ch") == "\t" || L("ch") == "\n") ++L("WHITE"); if (!L("str")) { if (L("tot") < G("MAX")) { L("str") = L("nm"); G("LEN") = L("tot"); } else if (L("tot") > G("MAX")) # HUGE TOKEN! { L("del") = G("MAX") - G("LEN"); L("xx") = strpiece(L("nm"),0,L("del")-1); L("str") = L("xx"); G("LEN") = G("MAX"); # TRACK AND LIMIT. return L("str"); } else # Exactly equal G("MAX"). { L("str") = L("nm"); G("LEN") = G("MAX"); # TRACK AND LIMIT. return L("str"); } } else if (pnvar(L("root"),"NOSP") || !L("WHITE")) { # L("str") = L("str") + L("nm"); if (L("tot") < G("MAX")) # TRACK AND LIMIT. { L("str") = L("str") + L("nm"); G("LEN") = L("tot"); # TRACK AND LIMIT. } else if (L("tot") > G("MAX")) # TRACK AND LIMIT. { L("del") = G("MAX") - G("LEN"); if (L("del") > 0) # FIX. # 10/12/11 AM. { L("xx") = strpiece(L("nm"),0,L("del")-1); L("str") = L("str") + L("xx"); } G("LEN") = G("MAX"); # TRACK AND LIMIT. return L("str"); } else # Exactly equal max length. { L("str") = L("str") + L("nm"); G("LEN") = G("MAX"); # TRACK AND LIMIT. return L("str"); } } # END of NOSP or !WHITE # else if (L("ch") == " " || L("ch") == "\t" || L("ch") == "\n") # If text is whitespace. # # ; # IGNORE WHITESPACE # # else # { # L("str") = L("str") + " " + L("nm"); # } } # END if literal else # Nonliteral. # Handle my subtree. L("str") = prosifyrec(pndown(L("root")),L("field"),L("str"),0); if (pnprev(L("root"))) return L("str"); # First in a list, handle the list. if (L("flag")) return L("str"); while (L("root") = pnnext(L("root"))) L("str") = prosifyrec(L("root"),L("field"),L("str"),1); return L("str"); } ###### # FN: SAMEVCONJ # SUBJ: See if compatible verb conjugations. # RET: Most constrained fit. # TODO: Handle ambiguous verb conjugations. ##### samevconj(L("v1"),L("v2")) { if (!L("v1") || !L("v2")) return 0; #"samevc.txt" << pnvar(L("v1"),"$text") # << "," << pnvar(L("v2"),"$text") << "\n"; L("vc1") = vconj(L("v1")); L("vc2") = vconj(L("v2")); #"samevc.txt" << L("vc1") << L("vc2") << "\n"; if (L("vc1") == L("vc2")) return L("vc1"); # For ambiguous cases... # Todo: Doesn't necessarily work if BOTH are ambig. if (vconjq(L("v1"),L("vc2"))) return L("vc2"); if (vconjq(L("v2"),L("vc1"))) return L("vc1"); # Now check out -edn -ed -en. if (L("vc1") != "-edn" && L("vc2") != "-edn") return 0; if (L("vc1") == "-en" || L("vc2") == "-en") return "-en"; if (L("vc1") == "-ed" || L("vc2") == "-ed") return "-ed"; return 0; # Error. } ###### # FN: ISNEWSENTENCE # SUBJ: See if node starts a new sentence. # RET: 1 if true, else 0. ##### isnewsentence(L("node")) { if (!L("node")) return 0; L("prev") = pnprev(L("node")); if (!L("prev")) return 1; L("name") = pnname(L("prev")); if (L("name") == "_qEOS" || L("name") == "_dbldash" || L("name") == "\"") return 1; # Tentatively... return 0; } ###### # FN: ELLIPTEDPASSIVE # SUBJ: See if node is passive subclause. # RET: 1 if true, else 0. # EG: John, bitten by lice, is mad... ###### elliptedpassive(L("clause")) { if (!L("clause")) return 0; L("vg") = pnvar(L("clause"),"vg node"); if (!L("vg")) return 0; if (pnvar(L("clause"),"voice") != "passive") return 0; L("v") = pnvar(L("vg"),"first verb"); if (!L("v")) return 0; if (pnvar(L("v"),"mypos") != "VBN") return 0; return 1; } ###### # FN: ELLIPTEDACTIVE # SUBJ: See if node is ving. # RET: 1 if true, else 0. # EG: John, biting nails, is mad... ###### elliptedactive(L("clause")) { if (!L("clause")) return 0; L("vg") = pnvar(L("clause"),"vg node"); if (!L("vg")) return 0; if (pnvar(L("clause"),"voice") != "active") return 0; L("v") = pnvar(L("vg"),"first verb"); if (!L("v")) return 0; if (pnvar(L("v"),"mypos") == "VBG") return 1; if (vconjq(L("v"),"-ing")) return 1; return 0; } ###### # FN: PNNAMEINRANGE # SUBJ: See if list of nodes has name. # RET: first node found. ###### pnnameinrange(L("name"),L("start"),L("end")) { if (!L("name") || !L("start")) return 0; if (L("end")) L("end") = pnnext(L("end")); L("node") = L("start"); while (L("node")) { if (pnname(L("node")) == "_advl") return L("node"); if (L("node") == L("end")) return 0; L("node") = pnnext(L("node")); } } ###### # FN: ATTRINRANGE # SUBJ: See if list of nodes has attr. # RET: First value found. ###### attrinrange(L("name"),L("start"),L("end")) { if (!L("name") || !L("start")) return 0; if (L("end")) L("end") = pnnext(L("end")); L("node") = L("start"); while (L("node")) { L("val") = pnvar(L("node"),L("name")); if (L("val")) return L("val"); if (L("node") == L("end")) return 0; L("node") = pnnext(L("node")); } } ###### # FN: PNREPLACEVALRANGE # SUBJ: Replace value in range of nodes. ###### pnreplacevalrange(L("var"),L("val"),L("start"),L("end")) { if (!L("var") || !L("start")) return; if (L("end")) L("end") = pnnext(L("end")); L("node") = L("start"); while (L("node")) { if (L("node") == L("end")) return; pnreplaceval(L("node"),L("var"),L("val")); L("node") = pnnext(L("node")); } } ###### # FN: VVAGREE # SUBJ: See if tenses of 2 verbs agree. # NOTE: ###### vvagree(L("n1"),L("n2")) { if (!L("n1") || !L("n2")) fail(); L("nm1") = pnname(L("n1")); L("nm2") = pnname(L("n2")); if (L("nm1") == "_vg" && L("nm2") == "_vg") return vgvgagree(L("vg1"),L("vg2")); if (L("nm1") == "_vg") L("v1") = pnvar(L("n1"),"first verb"); else L("v1") = L("n1"); if (L("nm2") == "_vg") L("v2") = pnvar(L("n2"),"first verb"); else L("v2") = L("n2"); return verbverbagree(L("v1"),L("v2")); } ###### # FN: VGVGAGREE # SUBJ: See if tenses of 2 vgs agree. ###### vgvgagree(L("vg1"),L("vg2")) { if (!L("vg1") || !L("vg2")) return 0; L("first1") = pnvar(L("vg1"),"first verb"); L("first2") = pnvar(L("vg2"),"first verb"); return verbverbagree(L("first1"),L("first2")); } ###### # FN: VERBVERBAGREE # SUBJ: See if tenses of 2 verbs agree. ###### verbverbagree(L("first1"),L("first2")) { if (!L("first1") || !L("first2")) return 0; L("vc1") = vconj(L("first1")); L("vc2") = vconj(L("first2")); #"vgvg.txt" << pnvar(L("first1"),"$text") << "," << pnvar(L("first2"),"$text") << "\n"; #"vgvg.txt" << L("vc1") << "," << L("vc2") << "\n"; if (L("vc1") == L("vc2")) return 1; if (pnvar(L("first1"),"inf") && pnvar(L("first2"),"inf")) return 1; if (pnvar(L("first1"),"-s") && pnvar(L("first2"),"-s")) return 1; if (pnvar(L("first1"),"-ing") && pnvar(L("first2"),"-ing")) return 1; if (pnvar(L("first1"),"-ed") && pnvar(L("first2"),"-ed")) return 1; if (pnvar(L("first1"),"-en") && pnvar(L("first2"),"-en")) return 1; # Check compatibles. # Check ambiguous. if (pnvar(L("first1"),"-edn")) { if (pnvar(L("first2"),"-edn") || pnvar(L("first2"),"-en") || pnvar(L("first2"),"-ed") ) return 1; } if (pnvar(L("first2"),"-edn")) { if (pnvar(L("first1"),"-en") || pnvar(L("first1"),"-ed") ) return 1; } return 0; } ######## # FUNC: PNADDSTR # SUBJ: Add string to a node's variable. # EX: pnaddstr(N(2),"hello","newstr"); # NOTE: For adding multiple values to a variable. ######## pnaddstr( L("node"), # Node we are adding info to. L("field"), L("str") ) { if (!L("node") || !L("field") || !L("str")) return; L("vals") = pnvar(L("node"),L("field")); if (!L("vals")) L("len") = 0; else L("len") = arraylength(L("vals")); # Can't directly append a new value onto node. # Need something like pnaddval(L("node"),L("field"),L("str")). L("vals")[L("len")] = L("str"); pnreplaceval(L("node"),L("field"),L("vals")); } ######## # FUNC: GETDATE # SUBJ: Compute a date string. ######## getdate(L("n")) { if (!L("n")) return 0; L("yr") = pnvar(L("n"),"yr"); L("dy") = pnvar(L("n"),"dy"); L("mo") = pnvar(L("n"),"mo"); if (!L("yr") && !L("mo")) return 0; if (!L("yr")) { if (L("mo") == 11 || L("mo") == 12) L("yr") = 2004; else L("yr") = 2005; } if (!L("mo")) return str(L("yr")) + "-00-00"; if (L("mo") < 10) L("mo") = "0" + str(L("mo")); if (L("dy") < 10) L("dy") = "0" + str(L("dy")); return str(L("yr")) + "-" + str(L("mo")) + "-" + str(L("dy")); } ######## # FUNC: NVSTEM # SUBJ: Compute stem for word. # NOTE: NLP++ stem() sucks, should do more of the below. ######## nvstem(L("txt")) { if (!L("txt")) return 0; L("lc") = strtolower(L("txt")); # Can do more accurate if it's a known word. if (spellword(L("lc"))) return nvstemknown(L("lc")); else return nvstemunk(L("lc")); } ######## # FUNC: NVSUFF # SUBJ: Compute suffix for noun or verb. # NOTE: NLP++ stem() sucks, should do more of the below. ######## nvsuff(L("txt")) { if (!L("txt")) return L("txt"); L("lc") = strtolower(L("txt")); L("stem") = nvstem(L("txt")); if (L("stem") == L("lc")) return "inf"; # Got some kind of stemming. if (strendswith(L("lc"),"s")) return "-s"; if (strendswith(L("lc"),"d")) return "-edn"; if (strendswith(L("lc"),"g")) return "-ing"; if (strendswith(L("lc"),"n")) return "-en"; } ######## # FUNC: NVSTEMKNOWN # SUBJ: Compute stem for known word. # NOTE: NOT HANDLING IRREGULAR VERBS. ######## nvstemknown(L("lc")) { if (!L("lc")) return 0; # EXCEPTIONS HERE. # (They should be in the kb, etc.) if (L("lc") == "ironed" || L("lc") == "ironing") return "iron"; # Look for EXTENSIONS in dictionary. # Try adding an "-ing". #L("x") = L("lc") + "ing"; #if (spellword(L("x"))) # return L("lc"); # # Try adding an "-s". #L("x") = L("lc") + "s"; #if (spellword(L("x"))) # return L("lc"); # Look for suffixes. # Check doubled letters... # -ss if (strendswith(L("lc"),"ss")) return L("lc"); L("len") = strlength(L("lc")); if (L("len") <= 3) return L("lc"); # As is. if (L("len") > 4) { # -ies => -y # -ied => -y if (strendswith(L("lc"),"ies") || strendswith(L("lc"),"ied")) { # Try lopping off the s. L("x") = strpiece(L("lc"),0,L("len")-2); if (spellword(L("x"))) return L("x"); # Lop off ies, add y. L("x") = strpiece(L("lc"),0,L("len")-4) + "y"; if (spellword(L("x"))) return L("x"); # Lop off es. (-ies => -i) L("x") = strpiece(L("lc"),0,L("len")-3); if (spellword(L("x"))) return L("x"); # Nothing worked. Return as is. return L("lc"); } # -ing => # -ing => -e # doubled if (strendswith(L("lc"),"ing")) { L("x") = strpiece(L("lc"),0,L("len")-4); # Check doubling. if (strpiece(L("lc"),L("len")-4,L("len")-4) == strpiece(L("lc"),L("len")-5,L("len")-5) ) { # Check as is. if (spellword(L("x"))) return L("x"); # Remove doubling. L("y") = strpiece(L("x"),0,L("len")-5); if (spellword(L("y"))) return L("y"); return L("lc"); } # Check -ing => -e L("y") = L("x") + "e"; if (spellword(L("y"))) return L("y"); # -ing => 0 if (spellword(L("x"))) return L("x"); # Nothing worked, return as is. return L("lc"); } # -es => -e # -es => 0 if (strendswith(L("lc"),"es")) { L("x") = strpiece(L("lc"),0,L("len")-3); L("y") = L("x") + "e"; if (spellword(L("y"))) return L("y"); if (spellword(L("x"))) return L("x"); return L("lc"); } # -ed => -e # doubled # -ed => if (strendswith(L("lc"),"ed")) { L("x") = strpiece(L("lc"),0,L("len")-3); # Check doubling. if (strpiece(L("x"),L("len")-3,L("len")-3) == strpiece(L("x"),L("len")-4,L("len")-4)) { L("y") = strpiece(L("x"),0,L("len")-4); if (spellword(L("y"))) return L("y"); } # Add an e. L("y") = L("x") + "e"; if (spellword(L("y"))) return L("y"); if (spellword(L("x"))) return L("x"); return L("lc"); } } # -s => if (strendswith(L("lc"),"s")) { L("x") = strpiece(L("lc"),0,L("len")-2); if (spellword(L("x"))) return L("x"); } return L("lc"); } ######## # FUNC: NVSTEMUNK # SUBJ: Compute possible stem for unknown word. ######## nvstemunk(L("lc")) { if (!L("lc")) return 0; # Look for suffixes. # -ss # -ies => -y # -es => -e # -s => # -ing => # -ing => -e # -ied => -y # -ed => -e # -ed => } ######## # FUNC: NODEFEAT # SUBJ: If node's stem has feature. ######## nodefeat(L("n"),L("feat")) { if (!L("n") || !L("feat")) return 0; L("lctxt") = nodestem(L("n")); if (!L("lctxt")) { "blupp.txt" << pnvar(L("n"),"$text") << "\n"; return 0; } L("lctxt") = strtolower(L("lctxt")); return finddictattr(L("lctxt"),L("feat")); } ######## # FUNC: VERBFEAT # SUBJ: If verb's stem has feature. ######## verbfeat(L("n"),L("feat")) { if (!L("n") || !L("feat")) return 0; if (pnvar(L("n"),"compound-vg")) L("n") = pnvar(L("n"),"last vg"); if (!L("n")) { "err.txt" << "verbfeat: " << phrasetext() << "\n"; "err.txt" << " " << G("$passnum") << "," << G("$rulenum") << "\n"; } L("nm") = pnname(L("n")); if (L("nm") == "_vg") L("v") = pnvar(L("n"),"verb node"); else L("v") = L("n"); if (!L("v")) return 0; L("lctxt") = nodestem(L("v")); if (!L("lctxt")) { "blup.txt" << pnvar(L("n"),"$text") << "\n"; return 0; } L("lctxt") = strtolower(L("lctxt")); return finddictattr(L("lctxt"),L("feat")); } ######## # FUNC: PHRPREPVERBQ # SUBJ: If possible phrasal/prepositional verb + particle. ######## phrprepverbq(L("nverb"),L("nprep")) { if (!L("nverb") || !L("nprep")) return 0; L("tverb") = nodestem(L("nverb")); L("tprep") = nodestem(L("nprep")); return finddictattr(L("tverb"),L("tprep")); } ######## # FUNC: PHRASALVERBQ # SUBJ: If possible phrasal verb + particle. ######## phrasalverbq(L("nverb"),L("nprep")) { if (!L("nverb") || !L("nprep")) return 0; L("tverb") = nodestem(L("nverb")); L("tprep") = nodestem(L("nprep")); L("num") = finddictattr(L("tverb"),L("tprep")); if (L("num") == 1 # Phrasal || L("num") == 3) # Both phrasal and prepositional. return 1; return 0; } ######## # FUNC: PREPOSITIONALVERBQ # SUBJ: If possible prepositional verb + prep. ######## prepositionalverbq(L("nverb"),L("nprep")) { if (!L("nverb") || !L("nprep")) return 0; L("tverb") = nodestem(L("nverb")); L("tprep") = nodestem(L("nprep")); L("num") = finddictattr(L("tverb"),L("tprep")); if (L("num") == 2 # Prepositional. || L("num") == 3) # Both phrasal and prepositional. return 1; return 0; } ######## # FUNC: LJUST # SUBJ: Left-justify string in field. ######## ljust(L("str"),L("num"),L("out")) { if (!L("out")) return; if (!L("str")) { indent(L("num"),L("out")); return; } L("len") = strlength(L("str")); L("out") << L("str"); L("diff") = L("num") - L("len"); indent(L("diff"),L("out")); } ############## ## RJ ## SUBJ: Right justify a string etc. ############## RJ(L("item"),L("field")) { if (!L("field")) return 0; if (!L("item")) L("num") = L("field"); else { L("str") = str(L("item")); L("len") = strlength(L("str")); if (L("len") >= L("field")) return L("str"); L("num") = L("field") - L("len"); } L("head") = " "; --L("num"); while (L("num") > 0) { L("head") = L("head") + " "; --L("num"); } if (L("str")) return L("head") + L("str"); else return L("head"); } ######## # FUNC: INDENT # SUBJ: Print padding. ######## indent(L("num"),L("out")) { if (!L("num") || !L("out")) return; while (L("num") > 0) { L("out") << " "; --L("num"); } } ######## # FUNC: NODECOUNT # SUBJ: Count nodes in a list. # NOTE: NLP++ todo: could be an internal like N("$count",1) ######## nodecount(L("start"),L("end")) { if (!L("start")) return 0; L("ii") = 0; if (L("end")) L("end") = pnnext(L("end")); while (L("start") && L("start") != L("end")) { ++L("ii"); L("start") = pnnext(L("start")); } return L("ii"); } ######## # FUNC: NODESTEXTOUT # SUBJ: Get text for nodes. ######## nodestextout( L("start"), L("end"), L("out") ) { if (!L("start") || !L("out")) return; if (L("end")) L("end") = pnnext(L("end")); while (L("start") && L("start") != L("end")) { L("out") << " " << nodetext(L("start")); L("start") = pnnext(L("start")); } } ######## # FUNC: CLAUSELASTNP # SUBJ: See if clause ends in np. ######## clauselastnp(L("n")) { if (!L("n")) return 0; L("nm") = pnname(L("n")); if (L("nm") == "_np") return 1; if (L("nm") == "_advl") { if (pnvar(L("n"),"pp")) # Prepositional phrase, np at end. return 1; return 0; } L("last") = pnvar(L("n"),"last"); if (L("last")) return clauselastnp(L("last")); # Recursive call on component. if (L("nm") != "_clause") return 0; L("p") = pnvar(L("n"),"pattern"); if (!L("p")) return 0; if (L("p") == "nvn" || L("p") == "vn" || L("p") == "n" ) return 1; return 0; } ######## # FUNC: CLAUSEINCOMPLETE # SUBJ: See if clause is transitive missing object. # NOTE: Eg, "John threw" which doesn't occur by itself, but # can follow in something like "There's the ball that John threw." ######## clauseincomplete(L("n")) { if (!L("n")) return 0; # Todo: Need knowledge of transitive and intransitive verbs. # Todo: Issues with prep/phrasal verb patterns also. L("p") = pnvar(L("n"),"pattern"); if (!L("p")) return 0; if (L("p") == "nv" || L("p") == "v" || L("p") == "vn" # 05/22/07 AM. ) return 1; return 0; } ######## # FUNC: CLAUSECOMPLETE # SUBJ: See if clause can be standalone. # NOTE: Eg, (incomplete) "John threw" which doesn't occur by itself, but # can follow in something like "There's the ball that John threw." ######## clausecomplete(L("n")) { if (!L("n")) return 0; # Todo: Need knowledge of transitive and intransitive verbs. # Todo: Issues with prep/phrasal verb patterns also. L("p") = pnvar(L("n"),"pattern"); if (!L("p")) return 0; if (L("p") == "nvn" # || L("p") == "vn" || L("p") == "nvnn" # || L("p") == "vnn" || L("p") == "nvj" # || L("p") == "vj" ) return 1; if (L("p") != "nv" && L("p") != "v" ) return 0; # See if can be intransitive. L("vg") = pnvar(L("n"),"vg node"); if (!L("vg")) return 0; L("v") = pnvar(L("vg"),"verb node"); if (!L("v")) return 0; L("stem") = nodestem(L("v")); "cl.txt" << "clcomplete=" << L("stem") << "\n"; if (!L("stem")) return 0; if (finddictattr(L("stem"),"intr")) return 1; # Todo: should be copula + necessary adverbial etc. if (copulaq(L("v"))) return 1; return 0; } ######## # FUNC: MHBVADV # SUBJ: Look at adverbials of mhbv phrase. ######## mhbvadv( L("n x1"), L("n x2"), L("n x3"), L("n x4") ) { if (L("n x1")) { L("a") = eltnode(L("n x1")); L("b") = lasteltnode(L("n x1")); L("neg") = attrinrange("neg",L("a"),L("b")); if (L("neg")) return 1; } if (L("n x2")) { L("a") = eltnode(L("n x2")); L("b") = lasteltnode(L("n x2")); L("neg") = attrinrange("neg",L("a"),L("b")); if (L("neg")) return 1; } if (L("n x3")) { L("a") = eltnode(L("n x3")); L("b") = lasteltnode(L("n x3")); L("neg") = attrinrange("neg",L("a"),L("b")); if (L("neg")) return 1; } if (L("n x4")) { L("a") = eltnode(L("n x4")); L("b") = lasteltnode(L("n x4")); L("neg") = attrinrange("neg",L("a"),L("b")); if (L("neg")) return 1; } return 0; } ######## # FUNC: QCLAUSEVG # SUBJ: A look at a clause's verb. ######## qclausevg(L("vg"),L("clause")) { if (!L("vg") || !L("clause")) return; if (pnname(L("vg")) != "_vg") return; L("v") = pnvar(L("vg"),"verb node"); L("dn") = pndown(L("vg")); if (L("dn") != L("v")) return; # Lone verb inside its vg. if (vconjq(L("v"),"-ing")) pnreplaceval(L("clause"),"ellipted-that-is",1); } ############## ## GETANCESTOR ## SUBJ: Get a desired ancestor node. ############## getancestor(L("n"),L("name")) { # Traverse up till you find it. while (L("n")) { L("nm") = pnname(L("n")); if (L("nm") == L("name")) # Found ancestor with desired name. return L("n"); if (pnup(L("n"))) L("n") = pnup(L("n")); else { while (L("n") && pnprev(L("n"))) # To first node in list. L("n") = pnprev(L("n")); L("n") = pnup(L("n")); # Up the tree. } } return 0; } ############## ## ALIGNNS ## SUBJ: Align node with string. ## RET: 1 if aligned with high confidence. ############## alignNS(L("node"),L("str")) { if (!L("node") || !L("str")) return 0; L("ntext") = nodetreetext(L("node")); return align(L("ntext"),L("str")); } ############## ## ALIGN ## SUBJ: Align two strings. ## RET: 1 if aligned with high confidence. ## 0 means could not align with confidence. (Words may still ## represent the same thing.) ############## align(L("str1"), L("str") # The "good" string being aligned to. ) { if (!L("str1") || !L("str")) return 0; if (G("dev")) "align.txt" << L("str1") << " <=> " << L("str") << "\n"; L("cnf") = 0; L("thresh") = 65; # Minimum confidence for aligned match. L("str1") = strtolower(L("str1")); L("str") = strtolower(L("str")); if (L("str1") == L("str")) { if (G("dev")) "align.txt" << " (same) MATCH" << "\n"; return 1; } # REMOVE SPACE TO "NORMALIZE". # L("str1") = strsubst(L("str1")," ",0); L("str") = strsubst(L("str")," ",0); if (L("str") == L("str1")) # Equal except for spaces. { if (G("dev")) "align.txt" << " (spacing difference) MATCH" << "\n"; return 1; } L("len1") = strlength(L("str1")); L("len") = strlength(L("str")); L("del") = absolute(L("len1") - L("len2")); if (L("len1") <= 2 || L("len") <= 2) { if (G("dev")) "align.txt" << " Too short to tell." << "\n"; return 0; # Can't really tell anything. } # Containment. if (L("del") <= 3 && L("len1") >= 4) { if (strcontains(L("str1"),L("str"))) { if (G("dev")) "align.txt" << " 1 contained in 2. MATCH" << "\n"; return 1; } } # USING LEVENSHTEIN. L("minlen") = minimum(L("len1"),L("len")); L("lev") = levenshtein(L("str1"),L("str")); if (!L("minlen") || L("lev") > L("minlen")) { if (G("dev")) "align.txt" << " 0%" << "\n"; return 0; } L("x") = 100.0*flt(L("minlen") - L("lev"))/flt(L("minlen")); if (G("dev")) "align.txt" << " lev,min,%=" << L("lev") << "," << L("minlen") << "," << L("x"); if (L("x") >= L("thresh")) { if (G("dev")) "align.txt" << " MATCH" << "\n"; return 1; } if (G("dev")) "align.txt" << " NO" << "\n"; return 0; #### IGNORING THE REST FOR NOW..... if (L("len1") > L("len")) L("f") = flt(L("len1")) / flt(L("len")); else L("f") = flt(L("len")) / flt(L("len1")); if (L("f") > 1.6) return 0; # Look at start and end match. # Even if redundant letters, the order match is important. # Todo: Longer prefix/suffix/infix for longer words. L("pre2") = strpiece(L("str"),0,1); L("pre1") = strpiece(L("str"),0,0); if (strstartswith(L("str1"),L("pre2"))) { if (L("len") >= 10) L("cnf") = L("cnf") %% 60; else if (L("len") >= 8) L("cnf") = L("cnf") %% 65; else if (L("len") >= 6) L("cnf") = L("cnf") %% 70; else if (L("len") == 5) L("cnf") = L("cnf") %% 75; else { if (G("dev")) "align.txt" << L("str1") << ", " << L("str") << "(same start) " << L("cnf") << "\n"; return 1; # Good enough. } } else if (strstartswith(L("str1"),L("pre1"))) { if (L("len") >= 10) L("cnf") = L("cnf") %% 50; else if (L("len") >= 8) L("cnf") = L("cnf") %% 55; else if (L("len") >= 6) L("cnf") = L("cnf") %% 60; else if (L("len") == 5) L("cnf") = L("cnf") %% 65; else if (L("len") == 4) L("cnf") = L("cnf") %% 68; else if (L("len") == 3) L("cnf") = L("cnf") %% 70; } L("suf3") = strpiece(L("str"),L("len")-3,L("len")-1); L("suf2") = strpiece(L("str"),L("len")-2,L("len")-1); L("suf1") = strpiece(L("str"),L("len")-1,L("len")-1); if (strendswith(L("str1"),L("suf3"))) { if (L("len") >= 10) L("cnf") = L("cnf") %% 65; else if (L("len") >= 8) L("cnf") = L("cnf") %% 70; else if (L("len") >= 6) L("cnf") = L("cnf") %% 75; else { if (G("dev")) "align.txt" << L("str1") << ", " << L("str") << "(same end) " << L("cnf") << "\n"; return 1; # Good enough. } } else if (strendswith(L("str1"),L("suf2"))) { if (L("len") >= 10) L("cnf") = L("cnf") %% 60; else if (L("len") >= 8) L("cnf") = L("cnf") %% 65; else if (L("len") >= 6) L("cnf") = L("cnf") %% 70; else if (L("len") == 5) L("cnf") = L("cnf") %% 75; else { if (G("dev")) "align.txt" << L("str1") << ", " << L("str") << "(same suff) " << L("cnf") << "\n"; return 1; # Good enough. } } else if (strendswith(L("str1"),L("suf1"))) { if (L("len") >= 10) L("cnf") = L("cnf") %% 50; else if (L("len") >= 8) L("cnf") = L("cnf") %% 55; else if (L("len") >= 6) L("cnf") = L("cnf") %% 60; else if (L("len") == 5) L("cnf") = L("cnf") %% 65; else if (L("len") == 4) L("cnf") = L("cnf") %% 68; else if (L("len") == 3) L("cnf") = L("cnf") %% 70; } if (L("cnf") >= L("thresh")) { if (G("dev")) "align.txt" << L("str1") << ", " << L("str") << "(quick) " << L("cnf") << "\n"; return 1; } # Try a simple compositional approach. # Assume that anagram matches are nonexistent. L("ii") = 0; while (L("ii") < L("len1")) { L("c") = strpiece(L("str1"),L("ii"),L("ii")); L("o") = letord(L("c")); ++L("arr1")[L("o")]; ++L("ii"); } L("ii") = 0; while (L("ii") < L("len")) { L("c") = strpiece(L("str"),L("ii"),L("ii")); L("o") = letord(L("c")); ++L("arr")[L("o")]; ++L("ii"); } # Now traverse and compare. L("ii") = 1; L("matches") = 0; while (L("ii") <= 26) { L("matches") = L("matches") + minimum(L("arr1")[L("ii")], L("arr")[L("ii")]); ++L("ii"); } L("min") = L("len"); # The one we're aligning to. L("percent") = 100 * L("matches") / L("min"); #"align.txt" << L("str1") << " | " << L("str") # << "\t" << L("matches") << "/" << L("min") << "\n"; L("cnf") = L("cnf") %% L("percent"); if (G("dev")) "align.txt" << L("str1") << ", " << L("str") << " " << L("cnf") << "\n"; if (L("cnf") >= L("thresh")) return 1; return 0; } ######## # FUNC: ARRAYMERGE # SUBJ: Merge two arrays. # NOTE: Only copying stuff if needed. # Does passing an array automatically copy it? # WARN: CALLER MUST DECIDE IF 0 MEANS EMPTY ARRAY # AND ACT ACCORDINGLY. ######## arraymerge(L("arr1"),L("arr2")) { #if (!L("arr1")) # return L("arr2"); #if (!L("arr2")) # return L("arr1"); L("len1") = arraylength(L("arr1")); L("len2") = arraylength(L("arr2")); L("merge") = L("arr1"); # COPY IT. L("ii") = 0; while (L("ii") < L("len2")) { L("merge")[L("len1") + L("ii")] = L("arr2")[L("ii")]; ++L("ii"); } return L("merge"); } ######## # FUNC: ARRAYREVERSE # SUBJ: Reverse order of elements in an array. # NOTE: Only copying stuff if needed. ######## arrayreverse(L("arr")) { if (!L("arr")) return 0; L("len") = arraylength(L("arr")); if (L("len") == 1) return L("arr"); L("ii") = 0; L("jj") = L("len") - 1; L("rev") = 0; while (L("ii") < L("len")) { L("rev")[L("ii")] = L("arr")[L("jj")]; ++L("ii"); --L("jj"); } return L("rev"); } ######## # FUNC: PUSH # SUBJ: Push value onto array. ######## push(L("val"),L("arr")) { if (!L("arr")) return L("val"); L("len") = arraylength(L("arr")); L("newarr")[0] = L("val"); L("ii") = 0; while (L("ii") < L("len")) { L("newarr")[L("ii")+1] = L("arr")[L("ii")]; ++L("ii"); } return L("newarr"); } ######## # FUNC: RPUSH # SUBJ: Push value onto array end of array. ######## rpush(L("arr"),L("val")) { if (!L("arr")) return L("val"); L("len") = arraylength(L("arr")); L("arr")[L("len")] = L("val"); return L("arr"); } ######## # FUNC: PUSHZ # SUBJ: Push value onto array. # NOTE: Treats zero as an array with one value. ######## pushz(L("val"),L("arr")) { #if (!L("arr")) # return L("val"); L("len") = arraylength(L("arr")); L("newarr")[0] = L("val"); L("ii") = 0; while (L("ii") < L("len")) { L("newarr")[L("ii")+1] = L("arr")[L("ii")]; ++L("ii"); } return L("newarr"); } ######## # FUNC: RPUSHZ # SUBJ: Push value onto array end of array. # NOTE: Treats zero as an array with one value. ######## rpushz(L("arr"),L("val")) { #if (!L("arr")) # return L("val"); L("len") = arraylength(L("arr")); L("arr")[L("len")] = L("val"); return L("arr"); } ######## PRINTCH # SUBJ: Print given number of chars. ################### printch(L("ch"),L("num"),L("out")) { if (!L("ch") || !L("num") || !L("out")) return; while (L("num") > 0) { L("out") << L("ch"); --L("num"); } } ######## RJX # SUBJ: Right-justify something to print. ################### rjx( L("item"), # Item to print. L("field"), # Length of field to print in. L("ch"), # Leading char. L("out") ) { if (!L("field") || !L("out")) return; if (!L("ch")) L("ch") = " "; # Default is space. L("str") = str(L("item")); # Must be forced to string. L("len") = strlength(L("str")); L("ii") = L("field") - L("len"); while (L("ii") > 0) { L("out") << L("ch"); --L("ii"); } L("out") << L("str"); } ######## CTR # SUBJ: Pretty-print centered in a field. ################### ctr( L("item"), # Item to print. L("field"), # Length of field to print in. L("ch"), # Surrounding chars. L("out") ) { if (!L("field") || !L("out")) return; if (!L("ch")) L("ch") = " "; # Default is space. L("str") = str(L("item")); # Must be forced to string. L("len") = strlength(L("str")); # Figure out preceding chars. L("pad") = L("field") - L("len"); L("pre") = L("pad")/2; L("post") = L("pad") - L("pre"); L("ii") = L("pre"); while (L("ii") > 0) { L("out") << L("ch"); --L("ii"); } L("out") << L("str"); if (!L("post")) return; if (L("ch") == " ") return; L("ii") = L("post"); while (L("ii") > 0) { L("out") << L("ch"); --L("ii"); } } ######## # FUNC: PNFINDCHILD # SUBJ: Find child node by name. # NOTE: Fetch the first one found. ######## pnfindchild(L("n"),L("name")) { if (!L("n") || !L("name")) return 0; L("n") = pndown(L("n")); if (!L("n")) return 0; while (L("n")) { if (L("name") == pnname(L("n"))) return L("n"); L("n") = pnnext(L("n")); } return 0; } ######## # FUNC: KBCHILDRENTOARRAY # SUBJ: Copy kb children to an array. # RET: Array. ######## kbchildrentoarray( L("parent") # Parent concept. ) { if (!L("parent")) return 0; L("n") = down(L("parent")); L("arr") = 0; L("ii") = 0; while (L("n")) { L("arr")[L("ii")] = L("n"); ++L("ii"); L("n") = next(L("n")); } return L("arr"); } ############## ## LETORD ## RET: Return 1 for "a", 2 for "b", etc. 0 for non-alpha. ## ASS: Lowercase alphabetic letter. ############## letord(L("c")) { if (L("c") == "a") return 1; if (L("c") == "b") return 2; if (L("c") == "c") return 3; if (L("c") == "d") return 4; if (L("c") == "e") return 5; if (L("c") == "f") return 6; if (L("c") == "g") return 7; if (L("c") == "h") return 8; if (L("c") == "i") return 9; if (L("c") == "j") return 10; if (L("c") == "k") return 11; if (L("c") == "l") return 12; if (L("c") == "m") return 13; if (L("c") == "n") return 14; if (L("c") == "o") return 15; if (L("c") == "p") return 16; if (L("c") == "q") return 17; if (L("c") == "r") return 18; if (L("c") == "s") return 19; if (L("c") == "t") return 20; if (L("c") == "u") return 21; if (L("c") == "v") return 22; if (L("c") == "w") return 23; if (L("c") == "x") return 24; if (L("c") == "y") return 25; if (L("c") == "z") return 26; return 0; } ############## ## RANGEFEAT ## SUBJ: Check range of nodes for occurrences of a feature. ## RET: 1 if range of nodes has needed feature occurrences. ############## rangefeat( L("start"), # First node. L("end"), # Last node. L("feat"), # Name of feature. L("min") # Minimum required occurrences for success. ) { if (!L("start") || !L("end") || !L("feat")) return 0; if (L("end")) L("end") = pnnext(L("end")); # Right bound. L("n") = L("start"); L("count") = 0; while (L("n") && L("n") != L("end")) { if (pnvar(L("n"),L("feat"))) ++L("count"); L("n") = pnnext(L("n")); } if (L("count") >= L("min")) return 1; return 0; } ############## ## RANGEFEATCOUNT ## SUBJ: Count occurrences of feature in range of nodes. ## RET: num = count of nodes that have the feature. ## NOTE: For example, how many nodes in range have "neg" ## or negation. ############## rangefeatcount( L("start"), # First node. L("end"), # Last node. L("feat") # Name of feature. ) { if (!L("start") || !L("end") || !L("feat")) return 0; if (L("end")) L("end") = pnnext(L("end")); # Right bound. L("n") = L("start"); L("count") = 0; while (L("n") && L("n") != L("end")) { if (pnvar(L("n"),L("feat"))) ++L("count"); L("n") = pnnext(L("n")); } return L("count"); } ############## ## PNCHILDSFEATCOUNT ## SUBJ: Count occurrences of feature in node's children. ## RET: num = count of nodes that have the feature. ## NOTE: For example, how many nodes in range have "neg" ## or negation. ############## pnchildsfeatcount( L("node"), # Node whose children we traverse. L("feat") # Name of feature. ) { if (!L("node") || !L("feat")) return 0; L("n") = pndown(L("node")); L("count") = 0; while (L("n")) { if (pnvar(L("n"),L("feat"))) ++L("count"); L("n") = pnnext(L("n")); } return L("count"); } ############## ## ARRAYOUT ## SUBJ: Print an array on a line. ## RET: ## KEY: printarray(), printarr() ############## arrayout( L("arr"), L("out"), L("sep") # Separator char. ) { if (!L("arr") || !L("out")) return; if (!L("sep")) { # Use the default, separate by whitespace char. L("out") << L("arr"); return; } L("len") = arraylength(L("arr")); if (L("len") <= 0) return; L("out") << L("arr")[0]; L("ii") = 1; while (L("ii") < L("len")) { L("out") << L("sep") << L("arr")[L("ii")]; ++L("ii"); } } ############## ## STRDEPUNCT ## SUBJ: Remove punctuation from a string. ## RET: ## NOTE: Should be an NLP++ function. ############## strdepunct(L("str")) { if (!L("str")) return 0; #"depx.txt" << L("str") << " <=> "; L("str") = strsubst(L("str"),"'"," "); L("str") = strsubst(L("str"),"�"," "); # 03/04/13 AM. L("str") = strsubst(L("str"),"."," "); L("str") = strsubst(L("str"),"-"," "); L("str") = strsubst(L("str"),"_"," "); L("str") = strsubst(L("str"),"#"," "); L("str") = strsubst(L("str"),","," "); L("str") = strsubst(L("str"),"("," "); L("str") = strsubst(L("str"),")"," "); L("str") = strsubst(L("str"),"/"," "); L("str") = strsubst(L("str"),"\"",0); # 03/04/13 AM. L("str") = strclean(L("str")); #"depx.txt" << L("str") << "\n"; return L("str"); } ############## ## LEFTATTR ## SUBJ: Look in parse subtree for leftmost node's attr. ## RET: Value of leftmost node's attr. ############## leftattr( L("n"), # Root of subtree. L("attr") # Name of attribute. ) { if (!L("attr")) return 0; L("val") = 0; while (L("n") && !L("val")) { L("val") = pnvar(L("n"),L("attr")); L("n") = pndown(L("n")); } return L("val"); } ############## ## RIGHTATTR ## SUBJ: Look in parse subtree for rightmost node's attr. ## RET: Value of rightmost node's attr. ############## rightattr( L("n"), # Root of subtree. L("attr") # Name of attribute. ) { if (!L("n") || !L("attr")) return 0; L("val") = pnvar(L("n"),L("attr")); if (L("val")) return L("val"); L("n") = pndown(L("n")); while (L("n") && !L("val")) { # Get rightmost node in chain. while (L("n")) { L("last") = L("n"); L("n") = pnnext(L("n")); } L("val") = pnvar(L("last"),L("attr")); L("n") = pndown(L("last")); } return L("val"); } ############## ## GETPOSITION ## SUBJ: Get position of a value in an array. ## RET: position in given array, 0 based. ## NOTE: Assume an array of unique values. Find the first match. ############## getposition( L("ii"), # Positive integer. L("arr") # Array ) { if (!L("ii") || !L("arr")) return 0; L("jj") = 0; while (!L("done")) { L("val") = L("arr")[L("jj")]; if (L("val") == L("ii")) return L("jj"); if (!L("val")) return 0; ++L("jj"); } return 0; } ############## ## ALPHACHARS ## SUBJ: Count alpha chars in a string. ## RET: count. ## OPT: Highly inefficient. ############## alphachars(L("str")) { if (!L("str")) return 0; L("len") = strlength(L("str")); while (L("ii") < L("len")) { L("ch") = strpiece(L("str"),L("ii"),L("ii")); if (strisalpha(L("ch"))) ++L("chars"); ++L("ii"); } return L("chars"); } ############## ## INITIALS ## SUBJ: Get initials of each word in given string. ## RET: initials = string of initials, separated by space. ## NOTE: Eg, "John Smith" => returns "J S". ## May want an option for no space in returned string. ############## initials(L("str")) { if (!L("str")) return 0; L("x") = split(L("str")," "); L("len") = arraylength(L("x")); L("inits") = strpiece(L("x")[0],0,0); ++L("ii"); while (L("ii") < L("len")) { L("inits") = L("inits") + " " + strpiece(L("x")[L("ii")],0,0); ++L("ii"); } return L("inits"); } ############## ## PERCENTRND ## SUBJ: Percent rounded to n decimal places. ## RET: string. ############## percentrnd( L("top"), # Numerator L("bot"), # Denominator L("n") # Decimal places ) { # Handle one place only, for now. L("n") = 1; if (!L("top") || !L("bot")) return " 0%"; if (L("top") >= L("bot")) return " 100% "; L("pct") = 100.0 * L("top") / L("bot"); L("h") = num(L("pct")); L("d") = L("pct") - L("h"); #if (L("h") < 10) # L("hs") = " " + str(L("h")); #else # L("hs") = str(L("h")); L("ds") = str(L("d")); L("len") = strlength(L("ds")); # Get rid of 0. part. L("ds") = strpiece(L("ds"),2,L("len")-1); L("len") = L("len") - 2; L("ii") = L("len") - 1; L("inc") = 0; # Whether to increment current digit. while (L("ii") > 0) { L("ch") = strpiece(L("ds"),L("ii"),L("ii")); L("cn") = num(L("ch")); if (L("inc")) ++L("cn"); if (L("cn") >= 10) { L("cn") = 0; L("inc") = 1; } else if (L("cn") >= 5) L("inc") = 1; else L("inc") = 0; L("ch") = str(L("cn")); # Convert rounded digit. if (L("ii") == L("n")) # Got number of places. { # Compute and return. if (L("n") == 1 && L("ch") == "0" && L("inc")) { # Increment the whole number. ++L("h"); if (L("h") >= 100) return "100% "; if (L("h") >= 10) return " " + str(L("h")) + ".0%"; return str(L("h")) + ".0%"; } if (L("n") == 1) { if (L("h") < 10) return " " + str(L("h")) + "." + L("ch") + "%"; return str(L("h")) + "." + L("ch") + "%"; } return "0"; # Not handled. } --L("ii"); } return str(L("pct")); } ########################################################### ### THREENORM # Subj: Normalize a numeric string to three or more places. # Add leading zeros as needed. ########################################################### threenorm(L("str")) { if (!L("str")) return 0; L("len") = strlength(L("str")); if (L("len") == 0) return 0; if (L("len") == 1) return "00" + L("str"); if (L("len") == 2) return "0" + L("str"); return L("str"); } ########################################################### ### TWONORM # Subj: Normalize a numeric string to two or more places. # Add leading zeros as needed. ########################################################### twonorm(L("str")) { if (!L("str")) return 0; L("len") = strlength(L("str")); if (L("len") == 0) return 0; if (L("len") == 1) return "0" + L("str"); return L("str"); } ########################################################### ### PAD # Subj: Create a padded string for given string, to fill out to given places. ########################################################### PAD( L("str"), # Given string to fill out. L("places"), # Number of places to fill out to. L("ch") # Character to use in filling. ) { if (!L("places") || !L("ch")) return 0; if (L("places") > 100) return 0; # Too much nonsense. if (!L("str")) L("len") = 0; else L("len") = strlength(L("str")); while (L("len") < L("places")) { L("ss") = L("ch") + L("ss"); ++L("len"); } return L("ss"); } ########################################################### ### CONVALS # SUBJ: Fetch a multi-valued con attribute from kb, as array. # RET: arr - array of found values. # NOTE: valstoarray, arrayfromvals... ########################################################### convals( L("con"), # Concept to look in. L("key") # Attribute key. ) { if (!L("con") || !L("key")) return 0; if (!(L("attr") = findattr(L("con"),L("key")))) return 0; if (!(L("val") = attrvals(L("attr")))) return 0; L("cons") = 0; # Build array of cons. L("ii") = 0; # Count for array. while (L("val")) { if (L("c") = getconval(L("val"))) { L("cons")[L("ii")] = L("c"); ++L("ii"); } L("val") = nextval(L("val")); } return L("cons"); } ########################################################### ### STRVALS # SUBJ: Fetch a multi-valued string attribute from kb, as array. # RET: arr - array of found values. # NOTE: valstoarray, arrayfromvals... ########################################################### strvals( L("con"), # Concept to look in. L("key") # Attribute key. ) { if (!L("con") || !L("key")) return 0; if (!(L("attr") = findattr(L("con"),L("key")))) return 0; if (!(L("val") = attrvals(L("attr")))) return 0; L("cons") = 0; # Build array of cons. L("ii") = 0; # Count for array. while (L("val")) { if (L("c") = getstrval(L("val"))) { L("cons")[L("ii")] = L("c"); ++L("ii"); } L("val") = nextval(L("val")); } return L("cons"); } ########################################################### ### CONVALSMAX # SUBJ: Fetch a multi-valued attribute from kb, as array. # RET: arr - array of found values. # NOTE: If number of values > max, return 0. ########################################################### convalsmax( L("con"), # Concept to look in. L("key"), # Attribute key. L("max") # Maximum number of allowed values. ) { if (!L("con") || !L("key")) return 0; if (!(L("attr") = findattr(L("con"),L("key")))) return 0; if (!(L("val") = attrvals(L("attr")))) return 0; L("cons") = 0; # Build array of cons. L("ii") = 0; # Count for array. while (L("val")) { if (L("c") = getconval(L("val"))) { L("cons")[L("ii")] = L("c"); ++L("ii"); } L("val") = nextval(L("val")); } if (L("ii") > L("max")) L("cons") = 0; return L("cons"); } ########################################################### ### NAMESTOLIST # SUBJ: Convert list of name nodes to unique word array. # RET: arr - array of unique name words. ########################################################### namestolist( L("pns"), # Array of nodes. L("count") # Array length. ) { if (!L("pns") || !L("count")) return 0; L("ii") = 0; L("names") = 0; while (L("ii") < L("count")) { L("n") = L("pns")[L("ii")]; L("last") = pnvar(L("n"),"lastname text"); if (L("last")) L("last") = strtolower(L("last")); L("names") = qaddstr(L("last"),L("names")); L("first") = pnvar(L("n"),"firstname text"); if (L("first")) L("first") = strtolower(L("first")); L("names") = qaddstr(L("first"),L("names")); L("mid") = pnvar(L("n"),"middlename text"); if (L("mid")) L("mid") = strtolower(L("mid")); L("names") = qaddstr(L("mid"),L("names")); ++L("ii"); } return L("names"); } ########################################################### ### UNIONCONS # SUBJ: Do a union of two arrays of concepts. # RET: arr - array of unique values. # NOTE: Not assuming that either of the arrays is unique values. # OPT: O(n-square) ########################################################### unioncons( L("c1"), L("c2") ) { if (!L("c1") && !L("c2")) return 0; L("arr") = 0; # New array. # Build array from first. L("len") = 0; if (L("c1")) L("len") = arraylength(L("c1")); L("ii") = 0; while (L("ii") < L("len")) { L("c") = L("c1")[L("ii")]; L("arr") = qaddvalue(L("c"),L("arr")); ++L("ii"); } # Build array from second. L("len") = 0; if (L("c2")) L("len") = arraylength(L("c2")); L("ii") = 0; while (L("ii") < L("len")) { L("c") = L("c2")[L("ii")]; L("arr") = qaddvalue(L("c"),L("arr")); ++L("ii"); } return L("arr"); } ######## # FUNC: INTERSECT # SUBJ: Intersection of two string-valued arrays. # RET: Array representing intersection. # NOTE: Caller should make sure arrays are DEDUP'ed. ######## intersect(L("arr1"),L("arr2")) { if (!L("arr1") || !L("arr2")) return 0; #L("arr1") = dedup(L("arr1")); #L("arr2") = dedup(L("arr2")); L("len1") = arraylength(L("arr1")); #L("len2") = arraylength(L("arr2")); L("ii") = 0; L("newarr") = 0; L("len") = 0; # Length of new array. while (L("ii") < L("len1")) { L("val") = L("arr1")[L("ii")]; if (valinarray(L("val"),L("arr2"))) { L("newarr")[L("len")] = L("val"); # Add new val. ++L("len"); } ++L("ii"); } return L("newarr"); } ########################################################### ### INTERSECTCOUNT # SUBJ: Count intersecting values in two arrays. # RET: num = number of matches. # NOTE: Ignoring any redundant value issues. ########################################################### intersectcount( L("a1"), L("a2") ) { if (!L("a1") || !L("a2")) return 0; L("len1") = arraylength(L("a1")); L("ii") = 0; while (L("ii") < L("len1")) { L("e") = L("a1")[L("ii")]; if (valinarray(L("e"),L("a2"))) ++L("matches"); ++L("ii"); } return L("matches"); } ######## # FUNC: UNIONX # SUBJ: Union of two string-valued arrays. # RET: Array representing union. # NOTE: Caller should make sure arrays are DEDUP'ed. # WARN: Compiling the word UNION conflicts with C++ type name. ######## unionx(L("arr1"),L("arr2")) { if (!L("arr1")) return L("arr2"); if (!L("arr2")) return L("arr1"); L("len1") = arraylength(L("arr1")); L("len2") = arraylength(L("arr2")); L("union") = L("arr1"); # COPY one array. L("ii") = 0; while (L("ii") < L("len2")) { L("val") = L("arr2")[L("ii")]; if (!valinarray(L("val"),L("union"))) { # Add to union. L("union")[L("len1")] = L("val"); ++L("len1"); } ++L("ii"); } return L("union"); } ######## # FUNC: DEDUP # SUBJ: Remove duplicate values from an array. # RET: Uniqued array. ######## dedup(L("arr")) { if (!L("arr")) return 0; L("len") = arraylength(L("arr")); if (L("len") == 1) return L("arr"); L("dedup") = L("arr")[0]; L("len1") = 1; L("ii") = 1; while (L("ii") < L("len")) { L("val") = L("arr")[L("ii")]; if (!valinarray(L("val"),L("dedup"))) { # Add to array. L("dedup")[L("len1")] = L("val"); ++L("len1"); } ++L("ii"); } return L("dedup"); } ########################################################### ### VALINARRAY # SUBJ: See if value is in array. # RET: 1 if found, else 0. ########################################################### valinarray( L("val"), L("arr") ) { if (!L("val") || !L("arr")) return 0; L("len") = arraylength(L("arr")); while (L("ii") < L("len")) { L("e") = L("arr")[L("ii")]; if (L("e") == L("val")) return 1; ++L("ii"); } return 0; } ############# KB FUNS ########################################################### ### CWORDS # SUBJ: Build string from kb attribute having word concepts. ########################################################### cwords( L("con"), # Concept L("key") # Attribute name. ) { if (!L("con") || !L("key")) return 0; L("attr") = findattr(L("con"),L("key")); if (!L("attr")) return 0; L("val") = attrvals(L("attr")); # If there are attribute values, build string. L("str") = 0; while (L("val")) { L("c") = getconval(L("val")); # Word concept. if (L("str")) L("str") = L("str") + " " + conceptname(L("c")); else L("str") = conceptname(L("c")); L("val") = nextval(L("val")); } return L("str"); } ########################################################### ### WORDSTODICT # SUBJ: Build array of dict concepts for array of words. # RET: Array of dict concepts. ########################################################### wordstodict(L("words")) { if (!L("words")) return 0; L("len") = arraylength(L("words")); while (L("ii") < L("len")) { L("w") = L("words")[L("ii")]; if (L("w")) L("c") = dictfindword(L("w")); else L("c") = 0; L("arr")[L("ii")] = L("c"); ++L("ii"); } return L("arr"); } ########################################################### ### WORDSTOCONVALS # SUBJ: Build unique array of concept values from word lookup. ########################################################### wordstoconvals( L("words"), # Array of words. L("key") # Attribute key to lookup. ) { if (!L("words") || !L("key")) return 0; L("len") = arraylength(L("words")); L("ii") = 0; L("cons") = 0; #if (G("dev")) "xxx.txt" << "wordstoconvals: len=" << L("len") << "\n"; while (L("ii") < L("len")) { L("w") = strtolower(L("words")[L("ii")]); L("c") = dictfindword(L("w")); # if (L("c")) # if (G("dev")) "xxx.txt" << "got word=" << L("w") << "\n"; L("vs") = convalsmax(L("c"),L("key"),9); L("cons") = unioncons(L("cons"),L("vs")); ++L("ii"); } return L("cons"); } ########################################################### ### UPDATEARRAY # Subj: Catenate two arrays and return result. # NOTE: Assume nonempty values. ########################################################### updatearray(L("arr1"),L("arr2")) { if (!L("arr2")) return L("arr1"); if (!L("arr1")) return L("arr2"); L("len1") = arraylength(L("arr1")); L("len2") = arraylength(L("arr2")); L("ii") = L("len1"); L("jj") = 0; while (L("jj") < L("len2")) { # Add value. # OPT: Needs optimization. L("arr1")[L("ii")] = L("arr2")[L("jj")]; ++L("ii"); ++L("jj"); } return L("arr1"); } ########################################################### ### PNRPUSHUNIQ # Subj: Push values onto node variable, assure unique values. # RET: 1 if ok. # NOTE: Assume nonempty values. # Adds only if unique. # concatenate arrays, merge arrays, fuse arrays. # union of arrays. (NOT intersection). # NLP++: Should be a built-in function. ########################################################### pnrpushuniq(L("n"),L("key"),L("arr")) { if (!L("n") || !L("key")) return 0; if (!L("arr")) return 1; L("len") = arraylength(L("arr")); L("ii") = 0; while (L("vv") = L("arr")[L("ii")]) { # "xxxx.txt" << pnvar(L("n"),L("key")) << "\n"; # "xxxx.txt" << "val=" << L("vv") << "\n"; if (!valinarray(L("vv"),pnvar(L("n"),L("key")) ) ) # If not redundant. # FIX. # 12/19/14 AM. pnrpushval(L("n"),L("key"),L("vv")); ++L("ii"); } return L("arr1"); } ########################################################### ### UPDATEARRAYUNIQ # Subj: Catenate two arrays and return result. # NOTE: Assume nonempty values. # Adds only if unique. # concatenate arrays, merge arrays, fuse arrays. # union of arrays. (NOT intersection). # OPT: See pnpushuniq # ########################################################### updatearrayuniq(L("arr1"),L("arr2")) { if (!L("arr2")) return L("arr1"); if (!L("arr1")) return L("arr2"); L("len1") = arraylength(L("arr1")); L("len2") = arraylength(L("arr2")); L("ii") = L("len1"); L("jj") = 0; while (L("jj") < L("len2")) { # OPT: Needs optimization. L("v") = L("arr2")[L("jj")]; if (L("v") && !valinarray(L("v"),L("arr1")) ) # If not redundant. { L("arr1")[L("ii")] = L("arr2")[L("jj")]; ++L("ii"); } ++L("jj"); } return L("arr1"); } ########################################################### ### XFEREARRAY # Subj: Update array from one node to another. # NOTE: Assume nonempty values. # RET: arr - the merged array. ########################################################### xferarray( L("to"), # Node to merge to. L("field"), # Field to merge to. L("arr") # Array to add in. ) { if (!L("to")) return 0; L("x") = pnvar(L("to"),L("field")); if (!L("field") || !L("arr")) return L("x"); pnreplaceval(L("to"),L("field"), updatearray(L("x"),L("arr")) ); L("x") = pnvar(L("to"),L("field")); return L("x"); } ########################################################### ### XFEREARRAYUNIQ # Subj: Update array from one node to another. # NOTE: Assume nonempty values. # ADD IF UNIQUE VALUES. # OLD: RET: arr - the merged array. # RET: [OPT] 1 if ok, else 0. # OPT: Caller can fetch from the node if needs the array. # ########################################################### xferarrayuniq( L("to"), # Node to merge to. L("field"), # Field to merge to. L("arr") # Array to add in. ) { if (!L("to")) return 0; L("x") = pnvar(L("to"),L("field")); if (!L("field") || !L("arr")) return 1; # Already there. # 12/17/14 AM. pnreplaceval(L("to"),L("field"),updatearrayuniq(L("x"),L("arr")) ); #L("x") = pnvar(L("to"),L("field")); #return L("x"); return 1; # [OPT] # 12/17/14 AM. } ############## ## CONVERTWORDNUM ## SUBJ: Get numeric value of a "number word". ## RET: int. ############## convertwordnum( L("str") # Single word for now. ) { if (!L("str")) return 0; # what about "ZERO". L("str") = str(L("str")); # Make sure it's a string. L("str") = strtolower(L("str")); if (L("str") == "one") return 1; if (L("str") == "two") return 2; if (L("str") == "three") return 3; if (L("str") == "four") return 4; if (L("str") == "five") return 5; if (L("str") == "six") return 6; if (L("str") == "seven") return 7; if (L("str") == "eight") return 8; if (L("str") == "nine") return 9; if (L("str") == "ten") return 10; if (L("str") == "eleven") return 11; if (L("str") == "twelve") return 12; if (L("str") == "thirteen") return 13; if (L("str") == "fourteen") return 14; if (L("str") == "fifteen") return 15; if (L("str") == "sixteen") return 16; if (L("str") == "seventeen") return 17; if (L("str") == "eighteen") return 18; if (L("str") == "nineteen") return 19; if (L("str") == "twenty") return 20; if (L("str") == "thirty") return 30; if (L("str") == "forty") return 40; if (L("str") == "fifty") return 50; if (L("str") == "sixty") return 60; if (L("str") == "seventy") return 70; if (L("str") == "eighty") return 80; if (L("str") == "ninety") return 90; if (L("str") == "hundred") return 100; if (L("str") == "thousand") return 1000; if (L("str") == "million") return 100000; return 0; } ########################################################### ### NODEARRAYADD # SUBJ: Add a value to an array in a node. # NOTE: Convenience function. ########################################################### nodearrayadd( L("node"), L("nm"), # Name of array variable. L("len"), # Length of array. L("val") # Value to add at end. ) { if (!L("node") || !L("nm")) return; L("arr") = pnvar(L("node"),L("nm")); L("arr")[L("len")] = L("val"); pnreplaceval(L("node"),L("nm"),L("arr")); } ############## ## NODEARRAYADDUNIQ ## SUBJ: Add one value to an array on a node. ## RET: ## LIKE pnaddval(), pnaddvaluniq(). ## NLP++: NEED PROPER NLP++ FUNCTION FOR THIS. ############## nodearrayadduniq( L("x"), # Node with array. L("field"), # Name of array variable. L("val") ) { if (!L("x") || !L("field") || !L("val")) return; L("arr") = pnvar(L("x"),L("field")); if (!L("arr")) # Var not there yet. { pnreplaceval(L("x"),L("field"),L("val")); return; } # Traverse array to the end. # If value not already present, add it. L("ii") = 0; while (L("v") = L("arr")[L("ii")] ) { if (L("v") == L("val")) return; # Found matching value. ++L("ii"); } # Reached end, didn't find matching value. L("arr")[L("ii")] = L("val"); # Place array back into node. pnreplaceval(L("x"),L("field"),L("arr")); } ########################################################### ### CLEANTEXT # Subj: Clean extra space etc. ########################################################### cleantext(L("str")) { if (!L("str")) return 0; L("str") = strsubst(L("str"),"\""," "); L("str") = strsubst(L("str"),","," "); L("str") = strsubst(L("str")," "," "); L("str") = strsubst(L("str")," "," "); L("str") = strsubst(L("str")," "," "); return L("str"); } ############## ## ROMANTONUM ## RET: Return 1 for "I", 2 for "II", etc. 0 for non-roman. ############## romantonum( L("str") # Roman numberal. ) { if (!L("str")) return 0; # L("str") = str(L("str")); # Make sure it's a string. L("str") = strtoupper(L("str")); if (L("str") == "I") return 1; if (L("str") == "II") return 2; if (L("str") == "III") return 3; if (L("str") == "IV") return 4; if (L("str") == "V") return 5; if (L("str") == "VI") return 6; if (L("str") == "VII") return 7; if (L("str") == "VIII") return 8; if (L("str") == "IX") return 9; if (L("str") == "X") return 10; if (L("str") == "XI") return 11; if (L("str") == "XII") return 12; if (L("str") == "XIII") return 13; if (L("str") == "XIV") return 14; if (L("str") == "XV") return 15; if (L("str") == "XVI") return 16; if (L("str") == "XVII") return 17; if (L("str") == "XVIII") return 18; if (L("str") == "XIX") return 19; if (L("str") == "XX") return 20; if (L("str") == "XXX") return 30; if (L("str") == "XL") return 40; if (L("str") == "L") return 50; if (L("str") == "LX") return 60; if (L("str") == "LXX") return 70; if (L("str") == "LXXX") return 80; if (L("str") == "XC") return 90; if (L("str") == "C") return 100; if (L("str") == "M") return 1000; return 0; } ######## # FUNC: CONCEPTPATHARRAY # SUBJ: Build array for a concept's path. # NOTE: For finding meeting point of two concept paths, etc. ######## conceptpatharray(L("cc")) { if (!L("cc")) return 0; L("arr")[0] = L("cc"); L("ii") = 0; # Index. while (L("cc") = up(L("cc")) ) { L("arr")[++L("ii")] = L("cc"); } return L("arr"); } ######## # FUNC: MATCHARRAYS # SUBJ: Traverse arrays till elements don't match. # NOTE: For finding meeting point of two concept paths, etc. ######## matcharrays(L("aa1"),L("aa2")) { if (!L("aa1") || !L("aa2")) return 0; # Traverse. while (!L("done")) { L("aa") = L("aa1")[L("ii")]; if (!L("aa")) return L("mm"); # DONE. L("bb") = L("aa2")[L("ii")]; if (!L("bb")) return L("mm"); # DONE. if (L("aa") == L("bb")) { L("mm") = L("aa"); # MATCHED ELEMENT. ++L("ii"); } else return L("mm"); # DONE. } return L("mm"); } ######## # FUNC: INHIERARCHY # SUBJ: See if given concept is under a concept by name. ######## inhierarchy(L("con"),L("parent_str")) { if (!L("parent_str")) return 0; # false. while (L("con")) { if (conceptname(L("con")) == L("parent_str")) return 1; # true. L("con") = up(L("con")); } return 0; # false. } ######## # FUNC: INHIER # SUBJ: See if given concept is under a concept. ######## inhier(L("con"),L("parent_con")) { #"hier.txt" << "inhier:" << "\n"; if (!L("con") || !L("parent_con")) return 0; # false. while (L("con")) { # "hier.txt" << conceptname(L("con")) # << "," << conceptname(L("parent_con")) << "\n"; if (L("con") == L("parent_con")) return 1; # true. L("con") = up(L("con")); } return 0; # false. } ######## # FUNC: PNADDVALORIG # SUBJ: Add value to a node's variable. # EX: pnaddval(N(2),"hello",2,"newstr"); # NOTE: For adding multiple values to a variable. ######## pnaddvalorig( L("node"), # Node we are adding info to. L("field"), L("ord"), L("val") # Taking zero also. ) { if (!L("node") || !L("field")) return; L("vals") = pnvar(L("node"),L("field")); # Not checking on array length. # Can't directly append a new value onto node. # Need something like pnaddval(L("node"),L("field"),L("str")). L("vals")[L("ord")] = L("val"); pnreplaceval(L("node"),L("field"),L("vals")); } ######## # FUNC: PNADDVAL # MOVE. # RENAME. # # SUBJ: Add value to a node's variable. # EX: pnaddstr(N(2),"hello","newval"); # NOTE: For adding multiple values to a variable. # MOVE. RENAME from pnaddstr(). # # NLP++: PROPER NLP++ FUNCTION FOR THIS SORELY LACKING. ######## pnaddval( L("node"), # Node we are adding info to. L("field"), L("val") ) { if (!L("node") || !L("field") || !L("val")) return; L("vals") = pnvar(L("node"),L("field")); if (!L("vals")) L("len") = 0; else L("len") = arraylength(L("vals")); # Can't directly append a new value onto node. # Need something like pnaddval(L("node"),L("field"),L("str")). L("vals")[L("len")] = L("val"); pnreplaceval(L("node"),L("field"),L("vals")); } ######## # FUNC: PNADDVALOPT # RENAME. # # SUBJ: Add value to a node's variable. # EX: pnaddval(N(2),"hello",2,"newstr"); # NOTE: For adding multiple values to a variable. # RENAME from pnaddval() # ######## pnaddvalopt( L("node"), # Node we are adding info to. L("field"), L("ord"), L("val") # Taking zero also. ) { if (!L("node") || !L("field")) return; L("vals") = pnvar(L("node"),L("field")); # Not checking on array length. # Can't directly append a new value onto node. # Need something like pnaddval(L("node"),L("field"),L("str")). L("vals")[L("ord")] = L("val"); pnreplaceval(L("node"),L("field"),L("vals")); } ######## # FUNC: LANGUAGEISO # SUBJ: Return ISO # EX: ######## languageiso(L("str")) { if (!L("str")) return 0; L("str") = strtoupper(L("str")); if (L("str") == "ENGLISH") return "EN"; if (L("str") == "FRENCH") return "FR"; if (L("str") == "GERMAN") return "DE"; if (L("str") == "SPANISH") return "ES"; if (L("str") == "ITALIAN") return "IT"; if (L("str") == "PORTUGUESE") return "PT"; if (L("str") == "SWEDISH") return "SV"; if (L("str") == "DANISH") return "DA"; if (L("str") == "FINNISH") return "FI"; if (L("str") == "NORWEGIAN") return "NO"; if (L("str") == "INDONESIAN") return "ID"; if (L("str") == "DUTCH") return "NL"; return 0; } ######## # FUNC: LANGUAGEFMISO # SUBJ: Given ISO # EX: ######## languagefmiso(L("str")) { if (!L("str")) return 0; L("str") = strtoupper(L("str")); if (L("str") == "EN") return "ENGLISH"; if (L("str") == "FR") return "FRENCH"; if (L("str") == "DE") return "GERMAN"; if (L("str") == "ES") return "SPANISH"; if (L("str") == "IT") return "ITALIAN"; if (L("str") == "PT") return "PORTUGUESE"; if (L("str") == "SV") return "SWEDISH"; if (L("str") == "DA") return "DANISH"; if (L("str") == "FI") return "FINNISH"; if (L("str") == "NO") return "NORWEGIAN"; if (L("str") == "ID") return "INDONESIAN"; if (L("str") == "NL") return "DUTCH"; return 0; } ######## # FUNC: STRTOKENIZE # SUBJ: Tokenize string into an array. # RET: Array, with each element holding a token. # EX: strtokenize("abc # OPT: Brutally clunky and inefficient. # TODO: Implement as NLP++ internal function. ######## strtokenize( L("str") ) { if (!L("str")) return 0; L("len") = strlength(L("str")); # Yes, length of string. L("arr") = 0; # Array to build, one elt per token. L("ii") = 0; # Char offset in string. L("jj") = 0; # Array element being built. L("tok") = 0; # String being built for current token. L("type") = 0; # Token type being worked on. # a = alpha, n = num, 0 = punct, white, or none. while (L("ii") < L("len")) { L("ch") = strpiece(L("str"),L("ii"),L("ii")); if (strisalpha(L("ch")) ) { if (L("type") == "a") # Continue alpha. L("tok") = L("tok") + L("ch"); else if (L("type") == "n") { L("arr")[L("jj")] = L("tok"); # Save number. ++L("jj"); L("tok") = L("ch"); } else L("tok") = L("ch"); # Start alpha. L("type") = "a"; # Working on alpha. } else if (strisdigit(L("ch")) ) { if (L("type") == "n") # Continue num. L("tok") = L("tok") + L("ch"); else if (L("type") == "a") { L("arr")[L("jj")] = L("tok"); # Save alpha. ++L("jj"); L("tok") = L("ch"); } else L("tok") = L("ch"); # Start num. L("type") = "n"; # Now working on num. } # NLP++: where is striswhite(), Amnon ? else if (L("ch") == " " || L("ch") == "\t" || L("ch") == "\n" || L("ch") == "\r") { if (L("type") == "a" || L("type") == "n") { L("arr")[L("jj")] = L("tok"); # Save token. ++L("jj"); } # This function skips over whitespace. L("tok") = 0; L("type") = 0; } # NLP++: where is strispunct(), Amnon ? else # Assume punct { if (L("type") == "a" || L("type") == "n") { L("arr")[L("jj")] = L("tok"); # Save token. ++L("jj"); } # Save each punct char as single token. L("arr")[L("jj")] = L("ch"); ++L("jj"); L("tok") = 0; L("type") = 0; } ++L("ii"); # Next offset in string. } # Save the last token if need be. if (L("type") == "a" || L("type") == "n") { L("arr")[L("jj")] = L("tok"); # Save token. ++L("jj"); } return L("arr"); } ######## # FUNC: PNSTRPRETTY # SUBJ: Grab a node's text for pretty printing. # EX: # NOTE: Limit length of string grabbed. # Node may have a huge length (eg root of parse tree # node. So limit to something like 50 chars, or this # becomes a big bottleneck for analyser speed. ######## pnstrpretty( L("n") # Parse tree node. ) { if (!L("n")) return 0; L("MAX") = G("MAX"); # MAX STRING LENGTH TO GRAB. # BOTTLENECK. CLAUSE LENGTH. May be huge. L("s") = pnvar(L("n"),"$ostart"); # Start offset in input buffer. L("e") = pnvar(L("n"),"$oend"); # End offset in input buffer. if (L("e") - L("s") > L("MAX")) { L("e") = L("s") + L("MAX"); L("str") = inputrange(L("s"),L("e")) + " ..."; } else L("str") = inputrange(L("s"),L("e")); if (!L("str")) return L("str"); return strclean(L("str")); #return L("str"); } ######## # FUNC: PNSTRMAX # SUBJ: Grab a node's text, delimited length. # EX: # NOTE: Limit length of string grabbed. # Node may have a huge length (eg root of parse tree # node. So limit to something like 50 chars, or this # becomes a big bottleneck for analyser speed. # NOTE: Uses original text from input buffer. ######## pnstrmax( L("n"), # Parse tree node. L("MAX") # Maximum size string to fetch. ) { if (!L("n") || !L("MAX")) return 0; # BOTTLENECK. CLAUSE LENGTH. May be huge. L("s") = pnvar(L("n"),"$ostart"); # Start offset in input buffer. L("e") = pnvar(L("n"),"$oend"); # End offset in input buffer. if (L("e") - L("s") > L("MAX")) L("e") = L("s") + L("MAX"); L("str") = inputrange(L("s"),L("e")); return L("str"); } ############## ## SETNODEVARCHAIN ## SUBJ: Set nodevar, chaining down. ## RET: ## TODO: Add to NLP++ functions. ## EX: setnodevarchain(N(2),"NOSP",N("NOSP",1)); ############## setnodevarchain( L("n"), # Node to set. L("var"), # Varname to set. can be 0. L("val") # Value to set. ) { if (!L("n") || !L("var")) return; # Error... while (L("n")) { pnreplaceval(L("n"),L("var"),L("val")); L("n") = pndown(L("n")); } } ############################ # FN: MIRROR # CR: (FOR TAI) # RET: FULL PATH FOR OUTPUT FILE. # NOTE: MODIFY TO RETURN OUTPUT FILE. # OUTPUT NAME BASED ON INPUT FILE HEAD. # Could specify as an argument to mirror() call. ############################ mirror() { L("FULLDIR") = mirrordir("data",1); # Create leaf folder. # 03/18/12 AM. # Now name and output file. L("id") = G("$inputhead"); # OUTPUT NAMED USING NON-TAIL PART OF INPUT FILE. L("fn") = L("FULLDIR") + "\\" + L("id") + ".txt"; #if (G("verbose")) "output.txt" << "final file=" << L("fn") << "\n"; # NLP++: May mangle newlines and other chars, use system call below. # OPT: System call below more efficient, better than inputrangetofile(). #L("s") = pnvar(pnroot(),"$ostart"); #L("e") = pnvar(pnroot(),"$oend"); #L("o") = openfile(L("fn"),0); # OVERWRITE. #inputrangetofile(L("s"),L("e"),L("o")); #closefile(L("o")); #L("o") = 0; # COPY INPUT FILE TO MIRRORED OUTPUT PATH (plus DEV### folder). #system("COPY /Y " + G("$input") + " " + L("fn")); return L("fn"); # RETURN OUTPUT FILE PATH AND NAME TO CALLER. } ############################ # FN: MIRRORDIR # CR: (FOR TAI) # RET: FULL PATH FOR LAST DIRECTORY OF OUTPUT FILE. # NOTE: Split from MIRROR. ############################ mirrordir( L("outfolder"), # "data" or "output" # 02/28/14 AM. L("lastdirflag") # 1 if create the leaf folder, else 0. # 03/18/12 AM. ) { if (!L("outfolder")) # 02/28/14 AM. L("outfolder") = "output"; # RECOVER. # 02/28/14 AM. if (G("verbose")) "mirror.txt" << "IN MIRROR:" << "\n"; # Make a "root" output folder on disk. L("FULLDIR") = G("$apppath") + "\\" + L("outfolder") # 02/28/14 AM. + "\\FOLDERS"; mkdir(L("FULLDIR")); # MAPPING OF INPUT FOLDERS TO OUTPUT FOLDERS. L("arr") = G("ARR FOLDERS"); L("len") = G("ARR FOLDERS LEN"); #if (G("MODE") != "mirror") # { # # Or just exit or do something else, if you have other modes, etc. # "error.txt" << "MODE=" << G("MODE") << " not implemented." << "\n"; # G("FAIL") = "MIRROR_MODE"; # return; # } # MIRROR INPUT FOLDERS. # Traverse to find folder called "input" in array. L("ii") = 0; L("ii input") = 0; L("done") = 0; while (!L("done")) { L("dir") = L("arr")[L("ii")]; L("lcdir") = strtolower(L("dir")); # Lowercase. if (L("lcdir") == "input") { L("ii input") = L("ii"); # Found the folder called "input". ++L("done"); } ++L("ii"); if (L("ii") >= L("len")) ++L("done"); } if (!L("ii input")) { "error.txt" << "Failed to find input folder in path." << "\n"; G("FAIL") = "NO_INPUT_FOLDER"; return; } if (!L("ii input")+1 >= L("len")) { "error.txt" << "No folders under input folder. Unhandled" << "\n"; G("FAIL") = "NO_INPUT_SUBFOLDER"; return; } # From below input folder to end of path, mirror the input folders. # G("DIR") = Global that tracks full filename path for output. # L("con") = Tracks the concept representing last folder in the path. L("ii") = L("ii input") + 1; while (L("ii") < L("len")) { L("dir") = L("arr")[L("ii")]; L("FULLDIR") = L("FULLDIR") + "\\" + L("dir"); # If making leaf folder, or not at leaf folder yet. # if (L("lastdirflag") || (L("ii") < (L("len")-1) )) # 03/18/12 AM. mkdir(L("FULLDIR")); ++L("ii"); } # Ok, we should be at bottom folder of input path. #if (G("verbose")) "output.txt" << "gdir=" << L("FULLDIR") << "\n"; ### MOVED STUFF FROM HERE TO mirror(). # return L("FULLDIR"); # 03/18/12 AM. } ############## ## PRETTYNODES ## SUBJ: Pretty print array of nodes. ## RET: ## NOTE: A debugging aid. ## EX: setnodevarchain(N(2),"NOSP",N("NOSP",1)); ############## prettynodes(L("arr"),L("o")) { if (!G("dev")) return; if (!L("arr") || !L("o")) return; L("o") << "\n" << "prettynodes:" << "\n"; L("len") = arraylength(L("arr")); while (L("ii") < L("len")) { L("x") = L("arr")[L("ii")]; L("o") << L("ii") << ": " << pnname(L("x")) << "\t" << nodetreetext(L("x")) << "\n"; ++L("ii"); } } ######## # FUNC: LOOKUPINTERNAL # SUBJ: Lookup a string in given hierarchy. # RET: con = last con in phrase is found. # EX: L("con") = lookupinternal("PS3",G("phrases")); ######## lookupinternal( L("str"), L("croot") # Root of "phrase tree" in kb. ) { if (!L("str") || !L("croot")) return 0; L("arr") = strtokenize(L("str")); if (!L("arr")) return 0; # Traverse down the phrase tree. L("ii") = 0; L("done") = 0; L("con") = L("croot"); while (!L("done")) { L("txt") = L("arr")[L("ii")]; if (L("txt")) L("con") = findconcept(L("con"),L("txt")); else if (L("con") && L("con") != L("croot") ) # 05/05/12 AM. return L("con"); # Found a concept. else return 0; if (!L("con")) return 0; ++L("ii"); } } ######## # FUNC: STRTOPHRASE # SUBJ: Make/get a phrase by traversing down given kb hierarchy. # RET: [0] last = last con in phrase. # [1] first = first concept in phrase. # EX: L("con") = makeemptyphrase("PS3",G("phrases")); ######## strtophrase( L("str"), L("croot") # Root of "phrase tree" in kb. ) { if (!L("str") || !L("croot")) return 0; L("arr") = strtokenize(L("str")); if (!L("arr")) return 0; # Traverse down the phrase tree. L("ii") = 0; L("done") = 0; L("con") = L("croot"); L("firstc") = 0; # First concept created. # 06/12/12 AM. while (!L("done")) { L("txt") = L("arr")[L("ii")]; if (L("txt")) { L("utxt") = strtoupper(L("txt")); L("con") = getconcept(L("con"),L("utxt")); if (!L("firstc")) L("firstc") = L("con"); # 06/12/12 AM. } else if (L("firstc")) # 06/12/12 AM. # else if (L("con") # && L("con") != L("croot") ) { L("a")[0] = L("con"); # leaf - last concept. # 06/12/12 AM. L("a")[1] = L("firstc"); # First concept. # 06/12/12 AM. return L("a"); # Found a concept. Return last/first. } else return 0; if (!L("con")) return 0; ++L("ii"); } return 0; # 06/12/12 AM. } ######## # FUNC: STRTODEPUNCTPHRASE # SUBJ: Make/get a phrase by traversing down given kb hierarchy. # RET: [0] last = last con in phrase. # [1] first = first concept in phrase. # EX: L("arr") = strtodepunctphrase("abc.def",G("phrases"),2); ######## strtodepunctphrase( L("str"), L("croot"), # Root of "phrase tree" in kb. L("min") # Min length to accept. ) { if (!L("str") || !L("croot")) return 0; L("x") = strdepunct(L("str")); if (!L("x")) return 0; L("arr") = strtokenize(L("x")); if (!L("arr")) return 0; L("len") = arraylength(L("arr")); if (L("len") < L("min")) return 0; # Traverse down the phrase tree. L("ii") = 0; L("done") = 0; L("con") = L("croot"); L("firstc") = 0; # First concept created. # 06/12/12 AM. while (!L("done")) { L("txt") = L("arr")[L("ii")]; if (L("txt")) { L("utxt") = strtoupper(L("txt")); L("con") = getconcept(L("con"),L("utxt")); if (!L("firstc")) L("firstc") = L("con"); # 06/12/12 AM. } else if (L("firstc")) # 06/12/12 AM. # else if (L("con") # && L("con") != L("croot") ) { L("a")[0] = L("con"); # leaf - last concept. # 06/12/12 AM. L("a")[1] = L("firstc"); # First concept. # 06/12/12 AM. return L("a"); # Found a concept. Return last/first. } else return 0; if (!L("con")) return 0; ++L("ii"); } return 0; # 06/12/12 AM. } ######## # FUNC: PRETTYSTRS # SUBJ: Pretty print an array of strings. ######## prettystrs( L("arr"), L("ind"), # Indent string. L("o") # Output stream. ) { if (!L("arr") || !L("o")) return; L("len") = arraylength(L("arr")); L("ii") = 0; while (L("ii") < L("len")) { if (L("ind")) L("o") << L("ind"); L("o") << "[" << L("ii") << "] "; L("o") << L("arr")[L("ii")] << "\n"; ++L("ii"); } } ######## # FUNC: PRETTYPNS # SUBJ: Pretty print an array of parse tree nodes. ######## prettypns( L("arr"), L("ind"), # Indent string. L("o") # Output stream. ) { if (!L("arr") || !L("o")) return; L("len") = arraylength(L("arr")); L("ii") = 0; while (L("ii") < L("len")) { L("n") = L("arr")[L("ii")]; if (L("ind")) L("o") << L("ind"); L("o") << "[" << L("ii") << "] "; L("o") << pnname(L("n")) << "\t" << nodetreetext(L("n")) << "\n"; ++L("ii"); } } ######## # FUNC: ARRTOCOMMASTR # SUBJ: Convert an array to comma-separated string. ######## arrtocommastr( L("arr") ) { if (!L("arr")) return; L("len") = arraylength(L("arr")); L("ii") = 0; L("str") = 0; while (L("ii") < L("len")) { if (!L("str")) L("str") = L("arr")[L("ii")]; else L("str") = L("str") + "," + L("arr")[L("ii")]; ++L("ii"); } return L("str"); } ######## # FUNC: PNFINDATTRDOWN # SUBJ: Find attr starting from current node. # NOTE: Fetch the first one found. # Look at first nodes down the tree. ######## pnfindattrdown(L("n"),L("field")) { if (!L("n") || !L("field")) return 0; # Look down. while (L("n")) { if (L("x") = pnvar(L("n"),L("field")) ) return L("x"); L("n") = pndown(L("n")); } return 0; } ######## # FUNC: PNRMVAL # SUBJ: Remove val from node's list. # RET: 1 if found in list, else 0. # NLP++: Useful as a built-in (todo). ######## pnrmval( L("n"), # Node with list. L("key"), # Field or attr name. L("val") # Attr value to remove. ) { if (!L("n") || !L("key") || !L("val")) return 0; L("list") = pnvar(L("n"),L("key")); if (!L("list")) return 0; L("ii") = 0; L("flag") = 0; # Flag if found. L("lnew") = 0; # New list. while (L("x") = L("list")[L("ii")] ) { if (L("x") != L("val")) { # Not the desired item. Add to new list. # OPT: N-squared, oould use a pnpush() NLP++ fun (todo). L("lnew") = addvalue(L("x"),L("lnew")); } ++L("ii"); } if (!L("flag")) return 0; # Replace with modified list. pnreplaceval(L("n"),L("key"),L("lnew")); return 1; } ######## # FUNC: RMLISTVAL # SUBJ: Remove val from given list. # RET: 1 if found in list, else 0. # NLP++: Useful as a built-in (todo). ######## rmlistval( L("list"), # List. L("val") # Attr value to remove. ) { if (!L("list") || !L("val")) return 0; L("ii") = 0; L("flag") = 0; # Flag if found. L("lnew") = 0; # New list. while (L("x") = L("list")[L("ii")] ) { if (L("x") != L("val")) { # Not the desired item. Add to new list. # OPT: N-squared, oould use a pnpush() NLP++ fun (todo). L("lnew") = addvalue(L("x"),L("lnew")); } ++L("ii"); } return L("lnew"); } ######## # FUNC: RMLISTVALCASE # SUBJ: Remove str val from given list, case insensitive. # RET: 1 if found in list, else 0. # NLP++: Useful as a built-in (todo). ######## rmlistvalcase( L("list"), # List. L("val") # Attr value to remove. ) { if (!L("list") || !L("val")) return 0; L("uval") = strtoupper(L("val")); L("ii") = 0; L("flag") = 0; # Flag if found. L("lnew") = 0; # New list. while (L("x") = L("list")[L("ii")] ) { L("ux") = strtoupper(L("x")); if (L("ux") != L("uval")) { # Not the desired item. Add to new list. # OPT: N-squared, oould use a pnpush() NLP++ fun (todo). L("lnew") = addvalue(L("x"),L("lnew")); } ++L("ii"); } return L("lnew"); } ######## # FUNC: FINDLASTNODE # SUBJ: Find last node in given list. # RET: 1 if found in list, else 0. # NLP++: Useful as a built-in (todo). # NOTE: Like lasteltnode(), but that one requires rule element context. # last node in range. last range node lastrangenode. ######## findlastnode( L("n") ) { if (!L("n")) return 0; while (L("x") = pnnext(L("n")) ) { L("n") = L("x"); } return L("n"); } ######################################################################## @CODE L("hello") = 0; if (!G("MAX")) G("MAX") = 256; # LIMIT NODE TEXT. G("LEN") = 0; # LIMIT NODE TEXT. @@CODE
@NODES _LINE @POST # Made $length cover multiple nodes. # #xaddlen("nindent", 2); X("nindent") = X("nindent") + N("$length",2); # 06/04/00 AM. singler(2,2); # 10/09/99 AM. @RULES # Count indentation separately. Doesn't add to nonwhite blob count. _whtINDENT [base] <- _xSTART _xWHITE [s plus]@@ # Recording long blobs of whitespace now. # @POST #xinc("nblobs") ++X("nblobs"); # 06/04/00 AM. single(); # 10/09/99 AM. @RULES # Note: Blobs = nonwhite regions of text in a line. # Note: counting end of line to get the right blob count. _whtSEP [base] <- _xWHITE [s min=5 max=0] @@ # Because of variable spacing in text regions, allowing up to 3 # whitespace to be a normal word separation. # @POST # xinc("nblobs") ++X("nblobs"); # 06/04/00 AM. @RULES _xNIL <- _xWHITE [s min=1 max=3] @@ @PRE <1,1> uppercase() @POST ++X("wallcaps"); # For Dave. # 11/30/99 AM. ++X("wcap"); # So nothing will change. # 11/30/99 AM. @RULES _xNIL <- _xALPHA [s] @@ @POST # xinc("wcap") # Num of capitalized words. ++X("wcap"); # 06/04/00 AM. @RULES _xNIL <- _xCAP [s] @@
@NODES _ROOT @POST G("filename") = N("pos") + ".dict"; G("filename") << N("word") << " pos=" << N("pos") << "\n"; @RULES _xNIL <- _LINE @@
@NODES _ROOT @CODE G("wordArr"); G("wordArrLen") = 0; @@CODE @POST L("word") = findconcept(G("words"), strtolower(N("$text", 1))); if ( L("word") ) { replaceval(L("word"), "count", numval(L("word"), "count") + 1); } else { L("newWord") = makeconcept(G("words"), strtolower(N("$text", 1))); addnumval(L("newWord"), "count", 1); G("wordArr")[G("wordArrLen")] = L("newWord"); ++G("wordArrLen"); } @RULES _xNIL <- _xWILD [matches=(_xALPHA) except=( the be to of and a in that have I it for not on with he as you do at this but his by from they we say her she or an will my one all would there their what so up out if about who get which go me when make can like time no just him know take people into year your good some could them see other than then now look only come its over think also back after use two how our work well way even new want because any these give day most us )] @@
@NODES _LINE @PRE <3,3> lowercase(); @POST X("word") = N("$text",3); "words.txt" << N("$text",3) << "\n"; @RULES _xNIL <- _xSTART ### (1) \{ ### (2) _xWILD [plus match=(_xALPHA \_ \-)] ### (3) @@
@PATH _ROOT _textZone _headerZone _LINE @POST X("lang") = N("lang"); @RULES _xNIL <- _langauge @@ @POST if (X("pessoa",3)) { X("pessoa2",3) = N("$text",1); } else { X("pessoa",3) = N("$text",1); } X("meaning",3) = 1; @RULES _xNIL <- _pessoa @@ @POST X("numero",3) = X("numero",4); X("meaning",3) = 1; @RULES _xNIL <- _numero @@ @POST X("tempo",3) = X("tempo",4); X("meaning",3) = 1; @RULES _xNIL <- _tempo @@ @POST X("root",3) = X("root",4); X("meaning",3) = 1; @RULES _xNIL <- _root @@ @POST X("stem",3) = N("$text",6); X("verb",3) = N("$text",8); IncrementCount(G("conjugations"),"count"); AddUniqueCon(G("conjugations"),N("$text",8)); single(); @RULES _conjug <- _xWILD [match=(\{)] ### (1) conj ### (2) \/ ### (3) _xALPHA ### (4) \| ### (5) _xALPHA ### (6) \| ### (7) _xALPHA ### (8) _xWILD [match=(\})] ### (9) @@
@NODES _ROOT @POST if (N("type")) { G("dict") << " " << N("type") << "=" << QuoteIfNeeded(strtolower(N("text"))); } @RULES _xNIL <- _info ### (1) @@
@NODES _LINE @RULES # Ex: minor\_concentration _minorKey <- minor [s] _xWHITE [star s] concentration [s] @@ # Ex: minored _minorKey <- minored [s] @@ # Ex: minors _minorKey <- minors [s] @@ # Ex: minoring _minorKey <- minoring [s] @@ # Ex: minor _minorKey <- minor [s] @@ # Ex: concentration _minorKey <- concentration [s] @@
# Built two layers of the same thing, so get rid of one. @NODES _ROOT @POST oldsplice(1,1) # 07/19/02 AM. @RULES _xNIL <- _RULES @@ _xNIL <- _CODE @@ _xNIL <- _DECL @@ # 12/19/01 AM. _xNIL <- _PRES @@ _xNIL <- _CHECKS @@ _xNIL <- _POSTS @@
# Count nouns that have a non-zero feature (i.e. node variable) @PRE <1,1> var("mass"); @POST ++G("count mass nouns"); @RULES _xNIL <- _noun @@
@NODES _ROOT @POST S("buf") = "Grammar" ; S("Grammar") = str(N("$text",3)) ; G("CurrentConcept") = findconcept(G("Elements"),N("ElementName",1)) ; G("attr") = findattr(G("CurrentConcept"),S("buf")) ; if (G("attr") == 0 ) { addattr(G("CurrentConcept"),S("buf")) ; addstrval(G("CurrentConcept"),S("buf"),S("Grammar")) ; } G("attrName") = "sortedYet" ; G("attr") = findattr(G("CurrentConcept"),G("attrName")) ; if (G("attr") == 0 ) { addattr(G("CurrentConcept"),G("attrName")) ; addsval(G("CurrentConcept"),G("attrName"),0) ; } single() ; @@POST @RULES _ElementDecl <- _ElementDeclStart [one] ### (1) _whiteSpace [opt] ### (2) _xWILD [star matches=("EMPTY" "ANY")] ### (3) _whiteSpace [opt] ### (4) _EndTag [one] ### (5) @@ _ElementDecl <- _ElementDeclStart [one] ### (1) _whiteSpace [opt] ### (2) _Mixed [one] ### (3) _whiteSpace [opt] ### (4) _EndTag [one] ### (5) @@ _ElementDecl <- _ElementDeclStart [one] ### (1) _whiteSpace [opt] ### (2) _choice [one] ### (3) _xWILD [opt matches=("?" "*" "+")] ### (4) _whiteSpace [opt] ### (5) _EndTag [one] ### (6) @@ _ElementDecl <- _ElementDeclStart [one] ### (1) _whiteSpace [opt] ### (2) _seq [one] ### (3) _xWILD [opt matches=("?" "*" "+")] ### (4) _whiteSpace [opt] ### (5) _EndTag [one] ### (6) @@ @@RULES @RULES _seq <- \( [one] ### (1) _whiteSpace [opt] ### (2) _cp [one] ### (3) _seqElement [plus] ### (4) _whiteSpace [opt] ### (5) \) [one] ### (6) _xWILD [opt matches=("*" "+" "?")] ### (7) @@ _seq <- \( [one] ### (1) _whiteSpace [opt] ### (2) _choice [one] ### (3) _seqElement [plus] ### (4) _whiteSpace [opt] ### (5) \) [one] ### (6) _xWILD [opt matches=("*" "+" "?")] ### (7) @@ _seq <- \( [one] ### (1) _whiteSpace [opt] ### (2) _seq [one] ### (3) _seqElement [plus] ### (4) _whiteSpace [opt] ### (5) \) [one] ### (6) _xWILD [opt matches=("*" "+" "?")] ### (7) @@ _seqElement <- _whiteSpace [opt] ### (1) \, [one] ### (2) _whiteSpace [opt] ### (3) _choice [one] ### (4) @@ _choice <- \( [one] ### (1) _whiteSpace [opt] ### (2) _cp [one] ### (3) _choiceElement [star] ### (4) _whiteSpace [opt] ### (5) \) [one] ### (6) _xWILD [opt matches=("*" "+" "?")] ### (7) @@ _choice <- \( [one] ### (1) _whiteSpace [opt] ### (2) _choice [one] ### (3) _choiceElement [plus] ### (4) _whiteSpace [opt] ### (5) \) [one] ### (6) _xWILD [opt matches=("*" "+" "?")] ### (7) @@ _choice <- \( [one] ### (1) _whiteSpace [opt] ### (2) _seq [one] ### (3) _choiceElement [plus] ### (4) _whiteSpace [opt] ### (5) \) [one] ### (6) _xWILD [opt matches=("*" "+" "?")] ### (7) @@ _choiceElement <- _whiteSpace [opt] ### (1) \| [one] ### (2) _whiteSpace [opt] ### (3) _seq [one] ### (4) @@ _seqElement <- _whiteSpace [opt] ### (1) \, [one] ### (2) _whiteSpace [opt] ### (3) _seq [one] ### (4) @@ _choiceElement <- _whiteSpace [opt] ### (1) \| [one] ### (2) _whiteSpace [opt] ### (3) _choice [one] ### (4) @@ @@RULES
# Fetch the word following the given word in the KB dictionary hierarchy. L("return_con") = dictnext(L("con"));
@NODES _LINE @PRE <1,1> varz("common"); @RULES _header <- _xALPHA ### (1) experience ### (2) @@ @PRE <2,2> vareq("resume","header"); @RULES _header <- _xSTART ### (1) _xALPHA ### (2) _xEND ### (3) @@
@NODES _ROOT @POST "verb.txt" << N("$text",5) << "\n"; single(); @RULES _header <- \< ### (1) h ### (2) 2 ### (3) \> ### (4) _xALPHA ### (5) Conjugation ### (6) \: ### (7) Present ### (8) Tense ### (9) \< ### (10) \/ ### (11) h ### (12) 2 ### (13) \> ### (14) @@ @RULES _tbody <- \< ### (1) tbody ### (2) \> ### (3) @@ @RULES _tbodyClose <- \< ### (1) \/ ### (2) tbody ### (3) \> ### (4) @@
# Sort an array of concepts by comparing an attribute named "count", of numeric type, in ascending order G("sorted concepts") = sortconsbyattr(G("unsorted concepts"),"count",1,0); G("unsorted concepts") = 0; # Explicitly remove the unsorted array.
@DECL # Join KB path concept names to form phrase GetKBPathString(L("path")) { L("pathArray") = split(L("path"), " "); L("len") = arraylength(L("pathArray")); L("i") = 4; L("string") = L("pathArray")[3]; L("string") = strpiece(L("string"), 1, strlength(L("string"))-2); while (L("i") < L("len")) { L("string") = L("string") + " " + strpiece(L("pathArray")[L("i")], 1, strlength(L("pathArray")[L("i")])-2); L("i") = L("i") + 1; } return L("string"); } GetICDTerms(L("node"), L("icd_split")) { # Stores phrases encountered L("completePhrases"); while (L("node")) { # Get text from pn node L("text") = pnvar(L("node"), "$text"); # If text is a word, convert to lowercase if (strisalpha(L("text"))) { L("text") = strtolower(L("text")); } L("term"); L("found") = 0; L("KBIter"); # Look up successive words in subtree until we reach leaf if (strtolower(L("icd_section")) == "top") { L("KBIter") = findconcept(G("mimic_split_codes") , L("text")); } else { L("KBIter") = findconcept(G("icd_d"), L("text")); } L("word") = L("node"); while (L("KBIter") && L("word")) { ## Store term, if it's terminal #if (findconcept(L("KBIter"), " # L("path") = conceptpath(L("KBIter")); # # L("term") = GetKBPathString(L("path")); # # L("found") = 1; # L("node") = L("word"); #} L("path") = conceptpath(L("KBIter")); L("term") = GetKBPathString(L("path")); # Advance to next word L("word") = pnnext(L("word")); L("wordText") = pnvar(L("word"), "$text"); L("KBIter") = findconcept(L("KBIter"), L("wordText")); } # Add longest term to list of terms found #if (L("found")) { if (arraylength(L("completePhrases")) == 1 && !L("completePhrases")) { L("completePhrases") = L("term"); } else { L("completePhrases")[arraylength(L("completePhrases"))] = L("term"); } #} L("node") = pnnext(L("node")); } return L("completePhrases"); } @@DECL
@NODES _LINE @POST X("header",2) = N("$text",3); single(); @RULES _header <- _xSTART ### (1) _xWILD [plus match=(\=)] ### (2) _xWILD [plus fail=(\=)] ### (3) _xWILD [plus match=(\=)] ### (4) _xEND ### (5) @@
@NODES _ROOT @POST S("header") = N("header",1); single(); @RULES _headerZone <- _header ### (1) _xWILD [fail=(_header _xEND)] ### (2) @@
@PATH _ROOT _educationZone @RULES _educationInstance <- _xWILD [s one match=( _xSTART _educationBoundary )] _xWILD [s star match=(_LINE _BLANKLINE) except=(_eduStart)] @@ _educationInstance <- _eduStart [s] _xWILD [s star match=(_LINE _BLANKLINE) except=(_eduStart)] @@
# Randomly permute N integers 0 to N-1, such that none are in normal order @CODE "output.txt" << "0 1 2 3 4" << "\n"; "output.txt" << permuten(5) << "\n"; @@CODE Outputs something like: 0 1 2 3 4 3 0 4 1 2
@NODES _ROOT @POST S("tag") = N("$text",3); single(); @RULES _tag <- \< ### (1) \/ [optional] ### (2) _xALPHA ### (3) \> ### (4) @@
@PATH _ROOT _bibItem @POST S("text") = N("$text",3); S("type") = "text"; single(); @RULES _text <- _xWILD [s plus fails=(_dash _bibItem _bibRef _spacing _text _bold _italics _emph _NEWLINE)] ### (3) @@
@NODES _ROOT @RULES _xNIL <- the ### (1) @@
@CODE L("hello") = 0; G("LINUX") = 0; # G("LINUX") = 1; # If on Linux system. # Workaround: use relative or hardwired filenames on Linux for now. # No #IFDEF in NLP++ # # If verbose outputs. G("verbose") = 1; # If error outputs. G("error") = 1; # If highlighting passes. # If not interactive (VisualText session), no point # in running highlighting passes. # NLP++: Want to know if Toggle Highlight Mode is on in VT. G("hilite") = 1; # 10/25/10 AM. #if (!interactive()) # # G("hilite") = 0; # # If expecting and zapping HTML/XML input. G("find html") = 1; # !!!!! if (G("find html")) # Some html setup. { } # Flag allcaps text. # This could also be sleuthed by the analyzer and checked # on a local basis (eg, group of five all-cap words). G("allcap") = 0; # If dealing with pretagged input texts. G("pretagged") = 0; # !!!!!! # Confidence threshold for outputting tags 0 # For now, using 0 to "guess" if nothing has been assigned. # Note: Computing confidence is not implemented. # CONFIGURE: TAGGER GUESSES WHEN UNSURE. # #G("threshold") = 70; # G("threshold") = 0; # 09/24/13 AM. # If gathering global data on rule performance. G("posacct") = 0; if (!G("pretagged")) G("posacct") = 0; if (G("posacct")) posacctini(); # If printing out an embedded tagged output file. # (Assuming input is plain text.) G("xml recursive") = 0; # If printing out xml tags only (excluding embedded text). G("xml tags only") = 0; # If bracketing noun phrases. G("bracket") = 1; # If printing a treebank header. G("treebank") = 0; # If pos tagging to conform as well as possible to Treebank # assignments that I disagree with. G("conform treebank") = 0; # If printing filenames with mismatch diagnostic output. G("mismatch verbose") = 1; if (G("pretagged")) { # Set up scoring. G("scorepos") = getconcept(findroot(),"scorepos"); # Reset scoring for current document. replaceval(G("scorepos"),"currgood",0); replaceval(G("scorepos"),"currtot",0); # Accumulate for total document set. # allgood # alltot # Collect single-word mismatches. # L("fname") = G("$apppath") + "\\data\\mismatch.txt"; G("mismatch out") = openfile(L("fname"),"app"); # Collect single-word zeros. # L("fname") = G("$apppath") + "\\data\zero.txt"; G("zero out") = openfile(L("fname"),"app"); } #### Initialize some general semantic handling in KB. G("semantic processing") = 1; if (G("semantic processing")) seminit(); # See semfuns pass. ## To help track development time. G("score date") = 1; # If outputting date with score. # Track runs of the analyzer. G("worklog") = 0; if (interactive()) G("worklog") = 1; if (G("worklog")) { # Worklog goes to data\worklog.txt if (!G("LINUX")) { L("file") = G("$apppath") + "\\data\\worklog.txt"; L("out") = openfile(L("file"),"app"); } else { L("file") = "../data/worklog.txt"; # L("out") = openfile(L("file"),"app"); L("out") = L("file"); # workaround. } L("out") << "[" << today() << "]\t[" << G("$inputhead") << "]\n"; closefile(L("out")); } @@CODE # DEV NOTE: _adjc will be a useful concept for adj constituent # analogous to _np and _vg.
@POST rfarecurses(1) single() @RULES _RECURSES [base] <- _RECURSE [plus] @@
# Look only at cap phrases within lines within experience zones. @PATH _ROOT _experienceZone _LINE @POST N("job conf",1) = N("len",1) + N("caplen",1) + N("jobtitleroots",1) * 3 + N("end jobtitleroot",1) * 10 ; N("company conf",1) = N("len",1) + N("caplen",1) + N("unknowns",1) * 10 # 12/09/99 AM. + N("companyroots",1) * 8 + N("companymods",1) * 10 # 12/07/99 AM. + N("companymodroots",1) * 10 # 12/07/99 AM. + N("end companymodroot",1) * 16 # 12/07/99 AM. + N("end companyroot",1) * 20 ; # If no appropriate words, reduce confidence. if (!N("jobtitleroots",1) || N("unknowns",1)) N("job conf",1) = N("job conf",1) - 15; if (!N("companyroots",1) && !N("companymodroots",1) && !N("companymods",1) && !N("unknowns",1) # 12/09/99 AM. ) N("company conf",1) = N("company conf",1) - 30; # If a single word like "LTD", forget it. if (N("len",1) == 1) # Single-word company name. { if (N("companyroots",1) || N("companymods",1) || N("companymodroots",1) ) N("company conf",1) = -99; else if (!N("unknown",1)) # Some known word. N("company conf",1) = 0; # Threshold if on anchor line. } # noop() # Implicit. @RULES _xNIL <- _Caps [s] @@
@PATH _ROOT _CHECKS _NLPPP @POST rfaactions(1) single() @RULES #_CHECKS [base] <- _ACTION [star] @@ _CHECKS [base] <- _STMTS [plus] @@
@CODE G("kb") = getconcept(findroot(),"kb"); rmchildren(G("kb")); @@CODE
@CODE G("codes") = getconcept(findroot(),"codes"); G("words") = getconcept(findroot(),"words"); G("lookups") = getconcept(findroot(),"lookups"); G("pos") = getconcept(findroot(),"pos"); @@CODE
# Remove the _adjs nodes from the parse tree, replacing them with their children @POST splice(1,2); @RULES _xNIL <- _adjs _adjs @@
############################################### # FILE: XML baseElements.pat # # SUBJ: Collect the smallest syntactic pieces # # of an XML file ... starts and ends of # # tags, entity references, and the like # # AUTH: Paul Deane # # CREATED: 11/Jan/01 # DATE OF CURRENT VERSION: 31/Aug/01 # # Copyright ############################################### ############################################### # CONTENT INDEX # # 1. Rules for special items like ampersands # # greater than etc. plus tag elements # # 2. Doctype declaration # # 3. Signals for special tag types including # # comments and entity references # ############################################### @CODE G("root") = findroot() ; G("tmp") = getconcept(G("root"),"tmp"); G("gramtab") = getconcept(G("tmp"),"gram") ; #in case someone is so thoughtless as not to specify a doc type G("DocTypeName") = "XML"; G("EntityName") = "Entities"; G("ElementName") = "Elements"; G("CurrentDocType") = findconcept(G("gramtab"),G("DocTypeName")) ; if (G("CurrentDocType") == 0 ) { makeconcept(G("gramtab"),G("DocTypeName")) ; G("CurrentDocType") = findconcept(G("gramtab"),G("DocTypeName")) ; } G("Entities") = findconcept(G("CurrentDocType"),G("EntityName")) ; if (G("Entities") == 0 ) { makeconcept(G("CurrentDocType"),G("EntityName")) ; G("Entities") = findconcept(G("CurrentDocType"),G("EntityName")) ; } G("Elements") = findconcept(G("CurrentDocType"),G("ElementName")) ; if (G("Elements") == 0 ) { makeconcept(G("CurrentDocType"),G("ElementName")) ; G("Elements") = findconcept(G("CurrentDocType"),G("ElementName")) ; } @@CODE @PATH _ROOT ################################### # Rule set 1 # # Special syntactic elements # ################################### @RULES _Ampersand <- \& [one] ###(1) _xWILD [one matches=("amp")] ###(2) \; [one] ###(3) @@ _LessThan <- \& [one] ###(1) _xWILD [one matches=("lt")] ###(1) \; [one] ###(3) @@ _GreaterThan <- \& [one] ###(1) _xWILD [one matches=("gt")] ###(2) \; [one] ###(3) @@ _APos <- \& [one] ###(1) _xWILD [one matches=("apos")] ###(2) \; [one] ###(3) @@ _Quote <- \& [one] ###(1) _xWILD [one matches=("quot")] ###(2) \; [one] ###(3) @@ _CommentStart <- \< [one] ### (1) \! [one] ### (2) \- [one] ### (3) \- [one] ### (4) @@ _CommentEnd <- \- [one] ### (1) \- [one] ### (2) \> [one] ### (3) @@ _DoubleHyphen <- \- [one] ### (1) \- [one] ### (2) @@ _StartXML <- \< [one] ### (1) \? [one] ### (2) _xALPHA [s one matches=("xml")] ### (3) @@ @@RULES ############################################## # Rule set 2 -- Doc Type Declaration # ############################################## @POST #get the name of the document type we're working on here #and attach that to the tag we're bulding for the doctype #statement G("buffer1") = str(0) ; G("buffer2") = str(0) ; G("ElementName") = "Elements" ; G("EntityName") = "Entities" ; if (N("$text",5)) G("buffer1") = str(N("$text",5)) ; if (N("$text",6)) G("buffer2") = str(N("$text",6)) ; if (N("$text",5) && N("$text",6)) { G("DocTypeName") = G("buffer1") + G("buffer2") ; } else if (N("$text",5)) G("DocTypeName") = G("buffer1") ; else if (N("$text",6)) G("DocTypeName") = G("buffer2") ; S("DocTypeName") = G("DocTypeName"); G("CurrentDocType") = findconcept(G("gramtab"),G("DocTypeName")) ; if (G("CurrentDocType") == 0 ) { makeconcept(G("gramtab"),G("DocTypeName")) ; G("CurrentDocType") = findconcept(G("gramtab"),G("DocTypeName")) ; } G("Entities") = findconcept(G("CurrentDocType"),G("EntityName")) ; if (G("Entities") == 0 ) { makeconcept(G("CurrentDocType"),G("EntityName")) ; G("Entities") = findconcept(G("CurrentDocType"),G("EntityName")) ; } G("Elements") = findconcept(G("CurrentDocType"),G("ElementName")) ; if (G("Elements") == 0 ) { makeconcept(G("CurrentDocType"),G("ElementName")) ; G("Elements") = findconcept(G("CurrentDocType"),G("ElementName")) ; } single() ; @@POST @RULES _StartDocType <- \< [one] ### (1) \! [one trig] ### (2) _xWILD [s one match=("DOCTYPE")] ### (3) _xWHITE [plus] ### (4) _xWILD [one matches=("_xALPHA" "_" ":")] ### (5) _xWILD [star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6) @@ @@RULES ############################################## # Rule set 3 -- Signals for specially tagged # # items like processing instructions and # # comments # ############################################## @RULES _StartProcessingInstruction <- ### (5) \< [one] ### (1) \? [one trig] ### (2) @@ _EndProcessingInstruction <- ### (10) \? [one] ### (1) \> [one] ### (2) @@ _CDStart <- \< [one] ### (1) \! [one] ### (2) \[ [one] ### (3) _xALPHA [s one matches=("CDATA")] ### (4) \[ ### (5) @@ _CDEnd <- \] [one] ### (1) \] [one] ### (2) \> [one] ### (3) @@ _EndDocType <- \] [one] ### (1) _xWHITE [star] ### (2) \> [one] ### (3) @@ _EndEmptyTag <- \/ [one] ### (1) \> [one] ### (2) @@ _EndTag <- \> [one] ### (1) @@ _CharRef <- \& [one] ### (1) \# [one] ### (2) _xNUM [one] ### (3) \; [one] ### (4) @@ _CharRef <- \& [one] ### (1) \# [one] ### (2) x [one] ### (3) _xWILD [one matches=("xNUM" "A" "a" "B" "b" "C" "c" "D" "d" "E" "e" "F" "f")] ### (4) \; [one] ### (5) @@ _EntityRef <- \& [one] ### (1) _xWILD [s one matches=("_xALPHA" "_" ":")] ### (2) _xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (3) \; [one] ### (4) @@ _PEReference <- \% [one] ### (1) _xWILD [s one matches=("_xALPHA" "_" ":")] ### (2) _xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (3) \; [one] ### (4) @@ @@RULES @POST #Get the name of the element we are declaring here S("buffer1") = N("$text",5) ; S("buffer2") = N("$text",6) ; if (S("buffer1") != 0 && S("buffer2") != 0 ) { S("ElementName") = S("buffer1") + S("buffer2") ; } else if (S("buffer1") !=0) S("ElementName") = S("buffer1") ; else S("ElementName") = S("buffer2") ; #record the elements we've identified as part of the DTD S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ; if (S("CurrentElement") == 0) { makeconcept(G("Elements"),S("ElementName")) ; S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ; } single() ; @@POST @RULES _ElementDeclStart <- \< [one] ### (1) \! [one] ### (2) _xWILD [s one matches=("ELEMENT")] ### (3) _xWHITE [plus] ### (4) _xWILD [s one matches=("_xALPHA" "_" ":")] ### (5) _xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6) @@ @@RULES @RULES _NotationDeclStart <- \< [one] ### (1) \! [one] ### (2) _xWILD [s one matches=("NOTATION")] ### (3) _xWHITE [plus] ### (4) _xWILD [s one matches=("_xALPHA" "_" ":")] ### (5) _xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6) @@ @@RULES @POST S("buffer1") = str(N("$text",5)) ; S("buffer2") = str(N("$text",6)) ; if (N("$text",5) && N("$text",6)) { S("ElementName") = S("buffer1") + S("buffer2") ; } else if (N("$text",5)) { S("ElementName") = N("$text",5) ; } else if (N("$text",6)) { S("ElementName") = N("$text",6) ; } S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ; if (S("CurrentElement") == 0) { makeconcept(G("Elements"),S("ElementName")) ; S("CurrentElement") = findconcept(G("Elements"),S("ElementName")) ; } single() ; @@POST @RULES _AttlistDeclStart <- \< [one] ### (1) \! [one] ### (2) _xWILD [s one matches=("ATTLIST")] ### (3) _xWHITE [plus] ### (4) _xWILD [s one matches=("_xALPHA" "_" ":")] ### (5) _xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (6) @@ @@RULES @RULES _EntityDeclStart <- \< [one] ### (1) \! [one] ### (2) _xWILD [s one matches=("ENTITY")] ### (3) _xWHITE [plus] ### (4) @@ @@RULES
@NODES _ROOT @POST L("counties") = getconcept(G("state"),"counties"); addnumval(L("counties"),"number",num(N("text",1))); single(); @RULES _xNIL <- _numCounties ### (1) @@
@CODE L("hello") = 0; @@CODE
@PATH _ROOT _textZone _LINE @RULES _pessoa <- _xWILD [one match=(primeira segunda terceira)] @@ @RULES _numero <- _xWILD [one match=(singular plural)] @@ @RULES _tempo <- presente @@ @RULES _tempo <- futuro @@ @RULES _tempo <- afirmativo @@ @RULES _tempo <- negativo @@ @RULES _tempo <- pretérito _xWILD [one match=(imperfeito perfeito)] @@ @RULES _tempo <- pretérito mais \- que \- perfeito @@ @RULES _tempo <- futuro do presente @@ @RULES _tempo <- futuro do pretérito @@
# Close the currently open database @CODE dbopen("test","root","mypassword"); dballocstmt(); dbexecstmt("SELECT * FROM table;"); dbbindcol(1,"varchar",50,&G("employee name"),&G("result1")); while (dbfetch()) {    "output.txt" << "employee name: ";    if (G("result1"))       "output.txt" << G("employee name") << "\n";    else       "output.txt" << "NULL" << "\n"; } dbfreestmt(); dbclose(); @@CODE
@CODE DisplayKB(G("currtext"),1); @@CODE
@PATH _ROOT _doctypedecl _Mixed @RULES _childItem <- _whiteSpace [opt] ### (1) \| [one] ### (2) _whiteSpace [opt] ### (3) _xWILD [s one matches=("_xALPHA" "_" ":")] ### (4) _xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (5) @@ _childItem <- _whiteSpace [opt] ### (1) \| [one] ### (2) _whiteSpace [opt] ### (3) _PEReference [one] ### (4) @@ @@RULES @POST noop() ; @@POST @RULES _xNIL <- _whiteSpace [one] ### (1) @@ @@RULES @POST "output.txt" << "Error: stray characters in PCDATA section\n"; noop() ; @@POST @RULES _xNIL <- _xWILD ### (1) @@ @@RULES
# Remove child con "a concept" under concept "myConcept" in kb, additionally create a child concept "a child" under "aParent" and then remove by number G("myConcept") = makeconcept(findroot(),"a concept"); G("result") = rmchild(G("myConcept"),"a concept"); This one does it by number: G("aParent")= makeconcept(findroot(),"a concept") G("aChild") = makeconcept(G("aParent"),"a child"); G("result") = rmchild(G("aParent"),1);
# Dynamically set the next node that the rule matcher will look at, after the current rule match is done @POST if (N(3)) single(); else { # Discard the comma match,if any. setlookahead(2); singler(1,1); } @RULES _namewtitle <- Smith \, [opt] CEO [opt] @@
@PATH _ROOT _LINE _description @POST L("con") = getconcept(G("codes"),X("code",2)); addstrval(L("con"),"disease",N("$text",1)); addstrval(L("con"),"cause",N("$text",4)); @RULES _newNode <- _xALPHA ### (1) due ### (2) to ### (3) _xALPHA ### (4) @@
@DECL # e to 15 decimal places e(L("flt")) { if (!L("flt")) { return "2.718281828459045"; } else { return flt("2.718281828459045"); } } # Pi to 15 decimal places pi(L("flt")) { if (!L("flt")) { return "3.141592653589793"; } else { return flt("3.141592653589793"); } } # Get absolute value abs(L("x")) { if (L("x") < 0) { return (L("x") * -1); } return L("x"); } # Integer power function # If L("str") is set and result is float, returns string pow(L("x"), L("y")) { L("i") = 0; L("result") = 1; L("negative") = 0; # Base case: x^0 = 1 if (L("y") == 0) { return L("result"); } else if (L("y") < 1 && L("y") > -1) { return 0; } if (L("y") < 0) { L("y") = L("y") * -1; L("negative") = 1; } while (L("i") < L("y")) { L("result") = L("result") * L("x"); L("i")++; } if (L("negative")) { return str(1.0 / flt(L("result"))); } else { return L("result"); } } # e^x exp(L("x")) { return pow(e(1), L("x")); } # Approximate square root to within epsilon. sqrt(L("x")) { L("epsilon") = 0.000001; L("s") = L("x"); while (abs(L("x") - (L("s") * L("s"))) > L("epsilon")) { L("s") = (L("s") + (L("x") / L("s"))) / 2.0; } return L("s"); } # Log # Currently only works for log(1) :) log(){ return 0; } @@DECL
@CODE L("hello") = 0; @@CODE @NODES _ROOT @POST if (!G("title")) G("title") = N("$treetext",2); N("header",2) = 1; # Flag header zone. @RULES _xNIL <- _title _xWILD [plus gp=_TEXTZONE] _Etitle @@ _xNIL <- _ha _xWILD [plus gp=_TEXTZONE] _Eha @@ _xNIL <- _hb _xWILD [plus gp=_TEXTZONE] _Ehb @@ _xNIL <- _hc _xWILD [plus gp=_TEXTZONE] _Ehc @@ _xNIL <- _hd _xWILD [plus gp=_TEXTZONE] _Ehd @@ _xNIL <- _he _xWILD [plus gp=_TEXTZONE] _Ehe @@ _xNIL <- _hf _xWILD [plus gp=_TEXTZONE] _Ehf @@ _xNIL <- _hx _xWILD [plus gp=_TEXTZONE] _Ehx @@
@NODES _termEntry @POST S("base") = N("$text", 3); X("base") = N("$text", 3); excise(4,4); excise(1,2); single(); @RULES _base <- base ### (1) \= ### (2) _xWILD [fails=(\n \r)] ### (3) _xWILD [one matches=(\n \r)] ### (4) @@
# Fetch the first word in the KB dictionary hierarchy @CODE L("con") = dictfirst(); while (L("con")) {   "output.txt" << conceptname(L("con") << "\n";   L("con") = dictnext(L("con")); } Prints a list of the dictionary entries, one per line.
@NODES _LINE @POST excise(1,1); noop(); @RULES _xNIL <- _xWHITE [s] ### (1) @@
@PATH _ROOT _paragraph _sentence ############################################### # Eastern District of New York ############################################### @PRE <2,2> var("direction"); <3,3> var("region"); <5,5> var("state"); @POST S("region") = N("$text",2) + " " + N("$text",3); S("state") = N("$text",5); single(); @RULES _residentOf <- _of ### (1) _xALPHA ### (2) _xALPHA ### (3) _of ### (4) _xWILD [match=(_xALPHA _phrase)] ### (5) @@ ############################################### # Stark County Ohio ############################################### @PRE <4,4> var("state"); @POST S("count") = N("$text",4); single(); @RULES _location <- _xALPHA ### (1) county ### (2) \, [opt] ### (3) _xALPHA ### (4) @@ ############################################### # city, state, country ############################################### @PRE <6,6> var("country"); @POST S("city") = N("$text",2); S("state") = N("$text",4); S("country") = N("$text",6); single(); @RULES _residentOf <- _of ### (1) _xALPHA [plus] ### (2) \, ### (3) _xALPHA [plus] ### (4) \, ### (5) _xALPHA [s] ### (6) @@ @PRE <4,4> var("country"); @POST S("city") = N("$text",2); S("country") = N("$text",4); single(); @RULES _residentOf <- _of ### (1) _xALPHA [plus] ### (2) \, ### (3) _xALPHA [s] ### (4) @@ ############################################### # Cities and States ############################################### @PRE <4,4> var("state"); @POST S("city") = N("$text",2); S("state") = N("$text",4); if (N("all",1)) S("all") = 1; single(); @RULES _residentOf <- _of ### (1) _xWILD [min=1 max=3 match=(_xALPHA)] ### (2) \, ### (3) _xALPHA ### (4) @@ @PRE <2,2> var("state"); @POST S("state") = N("$text",2); single(); @RULES _residentOf <- _of ### (1) _xALPHA ### (2) @@ ############################################### # Dates ############################################### @PRE <2,2> vareq("date","month"); <3,3> var("year"); @RULES _date <- in ### (1) _xALPHA ### (2) _xNUM ### (3) @@ @PRE <1,1> vareq("date","month"); <2,2> var("day"); <4,4> var("year"); @RULES _date <- _xALPHA ### (1) _xNUM ### (2) \, [opt] ### (3) _xNUM ### (4) @@ @PRE <1,1> vareq("date","month"); @RULES _date <- _xALPHA ### (2) _xNUM ### (3) @@ ############################################### # Regions ############################################### @PRE <1,1> var("direction"); <2,2> var("region"); @RULES _region <- _xALPHA _xALPHA @@ ############################################### # FBI Washington Field Office ############################################### @PRE <2,2> var("state"); @RULES _agency <- _xWILD [match=(_agency)] ### (1) _xALPHA ### (2) field ### (3) office ### (4) @@ ############################################### # were previously assigned to the case ############################################### @RULES _assigned <- _xWILD [match=(were was previously)] ### (1) assigned ### (2) to ### (3) the ### (4) case ### (5) @@
@CODE DispKB(); @@CODE
@NODES _ROOT @RULES _NotationDecl <- _NotationDeclStart [one] ### (1) _whiteSpace [opt] ### (2) _xWILD [min=1 max=0 fail=("_EndTag")] ### (3) _whiteSpace [opt] ### (4) _EndTag [one] ### (5) @@ _NDataDecl <- _whiteSpace [opt] ### (1) _xWILD [one matches=("NDATA")] ### (2) _whiteSpace [one] ### (3) _xWILD [s one matches=("_xALPHA" "_" ":")] ### (4) _xWILD [s star matches=("_xALPHA" "_xNUM" "." "-" "_" ":")] ### (5) @@
@NODES _LINE @POST excise(6,6); excise(4,4); excise(2,2); @RULES _xNIL <- _xWILD [fail=(_sep) gp=(_rank)] ### (1) _sep ### (2) _xWILD [fail=(_sep) gp=(_language)] ### (3) _sep ### (4) _xWILD [fail=(_sep) gp=(_country)] ### (5) _sep ### (6) _xWILD [fail=(_sep) gp=(_pop)] ### (7) @@
@CODE G("body") = getconcept(findroot(),"body"); G("part") = G("body"); @@CODE
@DECL ############# # FN: XMLRECURSION # SUBJ: Traverse parse tree to output embedded tags. # NOTE: To minimize recursion, each first child will manage # its list of siblings. # Using the global G("oxml") stream for printout. ############# xmlrecursion( L("n"), # Current node. L("root"), # 1 if a "root", else 0. L("inside") # 1 if "inside" an NP, for example. ) { # Consider current node. if (!L("n")) return; L("childs") = pndown(L("n")); # Check if printable. L("name") = pnname(L("n")); L("sem") = pnvar(L("n"),"sem"); if (pnvar(L("n"),"ne")) L("ne") = 1; L("tag") = 0; #if (G("NP")) # Analyzer caller says to print out NP nodes. # { # && (L("name") == "_nps" || L("name") == "_np") ) # L("tag") = "np"; # if (L("name") == "_phone") # L("tag") = "phone"; # else if (L("name") == "_url") # L("tag") = "url"; # } #else if (G("NOUN") # && (L("name") == "_noun") ) if (L("name") == "_np" && !pnvar(L("n"),"compound-np")) { if (!L("inside")) { if (L("ne")) { L("tag") = "NE"; L("childs") = 0; } else L("tag") = "NP"; L("nowinside") = 1; # For traversing inside it. } else if (L("ne")) { L("tag") = "NE"; L("childs") = 0; } } else if (L("name") == "_vg") { if (pnvar(L("n"),"list")) L("tag") = "VGLIST"; else { L("tag") = "VG"; L("childs") = 0; } } else if (L("name") == "_verb") L("tag") = "VG"; else if (L("name") == "_sent") L("tag") = "SENT"; else if (L("name") == "_clause") L("tag") = "CLAUSE"; else if (L("name") == "_prep") { if (!L("inside")) L("tag") = "PREP"; } else if (L("name") == "_advl") # Look for location case... { if (!L("inside")) L("tag") = "ADVL"; } else if (L("ne")) { L("tag") = "NE"; L("childs") = 0; } if (L("inside")) L("nowinside") = 1; if (L("tag")) # If printable. { if (!G("xml tags only")) printfiller(L("n"),G("oend"),G("oxml")); printtag(L("n"),L("tag"),G("oxml")); # G("oxml") << "<" << L("tag") << ">"; if (L("tag") == "NE") { if (G("verbose")) ljust(pnvar(L("n"),"$text"),30,"ne.txt"); if (G("verbose")) "ne.txt" << "\t" << pnvar(L("n"),"ne type") << "\n"; } # Update right edge so filler won't be printed again. G("oend") = pnvar(L("n"),"$ostart"); } # If a leaf, print it. if (!L("childs")) { if (!G("xml tags only")) { if (!L("tag")) printfiller(L("n"),G("oend"),G("oxml")); G("oxml") << xmlstr(pnvar(L("n"),"$text")); } G("oend") = pnvar(L("n"),"$oend"); } else # Else, traverse its subtree. xmlrecursion(L("childs"),0, L("nowinside")); # If printable, print its end tag. if (L("tag")) G("oxml") << "</" << L("tag") << ">"; # If first child and not a root, traverse siblings. if (!pnprev(L("n")) && !L("root")) { while (L("n") = pnnext(L("n")) ) xmlrecursion(L("n"),1,L("inside")); } } ############# ## FN: PRINTFILLER ## SUBJ: Print filler between prior and current node. ############# printfiller( L("node"), # Current node to print. L("oend"), # Right edge offset of last printed text. L("out") # Output stream. ) { L("ostart") = pnvar(L("node"),"$ostart"); if (L("oend") == L("ostart")) return; if (L("oend") && (L("ostart") - L("oend") - 1) ) { # Get snippet of input text between last and current output node. if (!(L("snip") = inputrange(L("oend")+1,L("ostart")-1) )) { if (G("error")) "err.txt" << "snip: " << L("oend") << "," << L("ostart") << "\n"; } if (L("n") = strchrcount(L("snip"),"\n")) # If snippet has a newline. { while (L("n")-- > 0) L("out") << "\n"; # Preserve newline. } else # Note: Even if there wasn't a space in the snippet, we don't know what got # excised in there, so printing space for safety. For example, if the parse excised # a paren, then no space means two alphabetics may be inappropriately glommed together. # (When excising, could flag if there is a need to separate what's left with a space...) L("out") << " "; # Preserve space. } } ############# ## FN: PRINTTAG ## SUBJ: Print an XML tag with attribute data. ############# printtag( L("node"), # Current node to print. L("name"), # Tag name. L("out") # Output stream. ) { if (!L("node") || !L("name") || !L("out")) return; # Error. L("out") << "<" << L("name"); # Print attributes here. L("voice") = pnvar(L("node"),"voice"); if (L("voice")) L("out") << " voice=\"" << L("voice") << "\""; L("sem") = pnvar(L("node"),"sem"); if (L("sem")) L("out") << " sem=\"" << L("sem") << "\""; L("stem") = pnvar(L("node"),"stem"); if (L("stem")) L("out") << " stem=\"" << L("stem") << "\""; L("tense") = pnvar(L("node"),"tense"); if (L("tense")) L("out") << " tense=\"" << L("tense") << "\""; L("aspect") = pnvar(L("node"),"aspect"); if (L("aspect")) L("out") << " aspect=\"" << L("aspect") << "\""; # Default to node's NE type. L("type") = pnvar(L("node"),"ne type"); # But if we resolve to a concept, use that NE type. L("kb ent") = pnvar(L("node"), "kb ent"); if (L("kb ent")) { L("con entity") = conval(L("kb ent"), "resolver"); if (L("con entity")) { L("type") = strval(L("con entity"), "ne type"); } } if (L("type")) L("out") << " type=\"" << L("type") << "\""; L("source") = pnvar(L("node"),"source"); if (L("source")) L("out") << " source=\"" << L("source") << "\""; L("date") = pnvar(L("node"),"timestamp"); if (L("date")) L("out") << " date=\"" << L("date") << "\""; L("country") = pnvar(L("node"),"country"); if (L("country")) L("out") << " country=\"" << L("country") << "\""; L("out") << ">"; } @CODE if (!G("xml recursive")) exitpass(); # Set up the recursion here. G("oend") = 0; # End offset of previously printed node. L("dir") = G("$apppath") + "\\data"; #L("fname") = L("dir") + "\\embed.xml"; #G("oxml") = openfile(L("fname"),"app"); L("fname") = L("dir") + "\\" + G("$inputhead") + ".xml"; G("oxml") = openfile(L("fname")); #L("dir") = G("$apppath") + "\\data\\xml_out"; #mkdir(L("dir")); #L("fname") = L("dir") + "\\" + G("$inputhead") + ".xml"; #G("oxml") = openfile(L("fname")); #G("oxml") = "embed.xml"; #xmlheader(G("oxml")); xmlstart("doc",G("oxml")); xmlshort("input",G("$inputname"),G("oxml")); xmlshort("output",G("$inputhead")+".xml",G("oxml")); xmlrecursion(pnroot(), 1, 0); xmlend("doc",G("oxml")); if (G("oxml")) closefile(G("oxml")); @@CODE

Dataset Card for "nlp_pp_code_dataset"

More Information needed

Downloads last month
0
Edit dataset card

Models trained or fine-tuned on AshtonIsNotHere/nlp_pp_code_dataset