|
|
|
|
|
import sys |
|
import json |
|
import spacy |
|
|
|
from nltk.stem.snowball import SnowballStemmer as Stemmer |
|
|
|
nlp = spacy.load("en_core_web_sm") |
|
|
|
|
|
|
|
from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER |
|
from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS |
|
from spacy.util import compile_infix_regex |
|
|
|
|
|
infixes = ( |
|
LIST_ELLIPSES |
|
+ LIST_ICONS |
|
+ [ |
|
r"(?<=[0-9])[+\-\*^](?=[0-9-])", |
|
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format( |
|
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES |
|
), |
|
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA), |
|
|
|
|
|
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA), |
|
] |
|
) |
|
|
|
infix_re = compile_infix_regex(infixes) |
|
nlp.tokenizer.infix_finditer = infix_re.finditer |
|
|
|
|
|
def contains(subseq, inseq): |
|
return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1)) |
|
|
|
|
|
def find_pmru(tok_title, tok_text, tok_kp): |
|
"""Find PRMU category of a given keyphrase.""" |
|
|
|
|
|
if contains(tok_kp, tok_title) or contains(tok_kp, tok_text): |
|
return "P" |
|
|
|
|
|
else: |
|
|
|
|
|
present_words = [w for w in tok_kp if w in tok_title or w in tok_text] |
|
|
|
|
|
if len(present_words) == len(tok_kp): |
|
return "R" |
|
|
|
elif len(present_words) > 0: |
|
return "M" |
|
|
|
else: |
|
return "U" |
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
data = [] |
|
|
|
|
|
with open(sys.argv[1], 'r') as f: |
|
|
|
for line in f: |
|
doc = json.loads(line.strip()) |
|
|
|
title_spacy = nlp(doc['title']) |
|
abstract_spacy = nlp(doc['abstract']) |
|
|
|
title_tokens = [token.text for token in title_spacy] |
|
abstract_tokens = [token.text for token in abstract_spacy] |
|
|
|
title_stems = [Stemmer('porter').stem(w.lower()) for w in title_tokens] |
|
abstract_stems = [Stemmer('porter').stem(w.lower()) for w in abstract_tokens] |
|
|
|
keyphrases_stems = [] |
|
for keyphrase in doc['keyphrases']: |
|
keyphrase_spacy = nlp(keyphrase) |
|
keyphrase_tokens = [token.text for token in keyphrase_spacy] |
|
keyphrase_stems = [Stemmer('porter').stem(w.lower()) for w in keyphrase_tokens] |
|
keyphrases_stems.append(keyphrase_stems) |
|
|
|
prmu = [find_pmru(title_stems, abstract_stems, kp) for kp in keyphrases_stems] |
|
doc['prmu'] = prmu |
|
|
|
data.append(json.dumps(doc)) |
|
print(doc['id']) |
|
|
|
|
|
with open(sys.argv[2], 'w') as o: |
|
o.write("\n".join(data)) |